python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-only /* * Xilinx Axi Ethernet device driver * * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <[email protected]> * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. * Copyright (c) 2010 - 2011 Michal Simek <[email protected]> * Copyright (c) 2010 - 2011 PetaLogix * Copyright (c) 2019 - 2022 Calian Advanced Technologies * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. * * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 * and Spartan6. * * TODO: * - Add Axi Fifo support. * - Factor out Axi DMA code into separate driver. * - Test and fix basic multicast filtering. * - Add support for extended multicast filtering. * - Test basic VLAN support. * - Add support for extended VLAN support. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <linux/math64.h> #include <linux/phy.h> #include <linux/mii.h> #include <linux/ethtool.h> #include "xilinx_axienet.h" /* Descriptors defines for Tx and Rx DMA */ #define TX_BD_NUM_DEFAULT 128 #define RX_BD_NUM_DEFAULT 1024 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) #define TX_BD_NUM_MAX 4096 #define RX_BD_NUM_MAX 4096 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ #define DRIVER_NAME "xaxienet" #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" #define DRIVER_VERSION "1.00a" #define AXIENET_REGS_N 40 /* Match table for of_platform binding */ static const struct of_device_id axienet_of_match[] = { { .compatible = "xlnx,axi-ethernet-1.00.a", }, { .compatible = "xlnx,axi-ethernet-1.01.a", }, { .compatible = "xlnx,axi-ethernet-2.01.a", }, {}, }; MODULE_DEVICE_TABLE(of, axienet_of_match); /* Option table for setting up Axi Ethernet hardware options */ static struct axienet_option axienet_options[] = { /* Turn on jumbo packet support for both Rx and Tx */ { .opt = XAE_OPTION_JUMBO, .reg = XAE_TC_OFFSET, .m_or = XAE_TC_JUM_MASK, }, { .opt = XAE_OPTION_JUMBO, .reg = XAE_RCW1_OFFSET, .m_or = XAE_RCW1_JUM_MASK, }, { /* Turn on VLAN packet support for both Rx and Tx */ .opt = XAE_OPTION_VLAN, .reg = XAE_TC_OFFSET, .m_or = XAE_TC_VLAN_MASK, }, { .opt = XAE_OPTION_VLAN, .reg = XAE_RCW1_OFFSET, .m_or = XAE_RCW1_VLAN_MASK, }, { /* Turn on FCS stripping on receive packets */ .opt = XAE_OPTION_FCS_STRIP, .reg = XAE_RCW1_OFFSET, .m_or = XAE_RCW1_FCS_MASK, }, { /* Turn on FCS insertion on transmit packets */ .opt = XAE_OPTION_FCS_INSERT, .reg = XAE_TC_OFFSET, .m_or = XAE_TC_FCS_MASK, }, { /* Turn off length/type field checking on receive packets */ .opt = XAE_OPTION_LENTYPE_ERR, .reg = XAE_RCW1_OFFSET, .m_or = XAE_RCW1_LT_DIS_MASK, }, { /* Turn on Rx flow control */ .opt = XAE_OPTION_FLOW_CONTROL, .reg = XAE_FCC_OFFSET, .m_or = XAE_FCC_FCRX_MASK, }, { /* Turn on Tx flow control */ .opt = XAE_OPTION_FLOW_CONTROL, .reg = XAE_FCC_OFFSET, .m_or = XAE_FCC_FCTX_MASK, }, { /* Turn on promiscuous frame filtering */ .opt = XAE_OPTION_PROMISC, .reg = XAE_FMI_OFFSET, .m_or = XAE_FMI_PM_MASK, }, { /* Enable transmitter */ .opt = XAE_OPTION_TXEN, .reg = XAE_TC_OFFSET, .m_or = XAE_TC_TX_MASK, }, { /* Enable receiver */ .opt = XAE_OPTION_RXEN, .reg = XAE_RCW1_OFFSET, .m_or = XAE_RCW1_RX_MASK, }, {} }; /** * axienet_dma_in32 - Memory mapped Axi DMA register read * @lp: Pointer to axienet local structure * @reg: Address offset from the base address of the Axi DMA core * * Return: The contents of the Axi DMA register * * This function returns the contents of the corresponding Axi DMA register. */ static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) { return ioread32(lp->dma_regs + reg); } static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, struct axidma_bd *desc) { desc->phys = lower_32_bits(addr); if (lp->features & XAE_FEATURE_DMA_64BIT) desc->phys_msb = upper_32_bits(addr); } static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, struct axidma_bd *desc) { dma_addr_t ret = desc->phys; if (lp->features & XAE_FEATURE_DMA_64BIT) ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; return ret; } /** * axienet_dma_bd_release - Release buffer descriptor rings * @ndev: Pointer to the net_device structure * * This function is used to release the descriptors allocated in * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet * driver stop api is called. */ static void axienet_dma_bd_release(struct net_device *ndev) { int i; struct axienet_local *lp = netdev_priv(ndev); /* If we end up here, tx_bd_v must have been DMA allocated. */ dma_free_coherent(lp->dev, sizeof(*lp->tx_bd_v) * lp->tx_bd_num, lp->tx_bd_v, lp->tx_bd_p); if (!lp->rx_bd_v) return; for (i = 0; i < lp->rx_bd_num; i++) { dma_addr_t phys; /* A NULL skb means this descriptor has not been initialised * at all. */ if (!lp->rx_bd_v[i].skb) break; dev_kfree_skb(lp->rx_bd_v[i].skb); /* For each descriptor, we programmed cntrl with the (non-zero) * descriptor size, after it had been successfully allocated. * So a non-zero value in there means we need to unmap it. */ if (lp->rx_bd_v[i].cntrl) { phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); dma_unmap_single(lp->dev, phys, lp->max_frm_size, DMA_FROM_DEVICE); } } dma_free_coherent(lp->dev, sizeof(*lp->rx_bd_v) * lp->rx_bd_num, lp->rx_bd_v, lp->rx_bd_p); } /** * axienet_usec_to_timer - Calculate IRQ delay timer value * @lp: Pointer to the axienet_local structure * @coalesce_usec: Microseconds to convert into timer value */ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) { u32 result; u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */ if (lp->axi_clk) clk_rate = clk_get_rate(lp->axi_clk); /* 1 Timeout Interval = 125 * (clock period of SG clock) */ result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, (u64)125000000); if (result > 255) result = 255; return result; } /** * axienet_dma_start - Set up DMA registers and start DMA operation * @lp: Pointer to the axienet_local structure */ static void axienet_dma_start(struct axienet_local *lp) { /* Start updating the Rx channel control register */ lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; /* Only set interrupt delay timer if not generating an interrupt on * the first RX packet. Otherwise leave at 0 to disable delay interrupt. */ if (lp->coalesce_count_rx > 1) lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx) << XAXIDMA_DELAY_SHIFT) | XAXIDMA_IRQ_DELAY_MASK; axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); /* Start updating the Tx channel control register */ lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; /* Only set interrupt delay timer if not generating an interrupt on * the first TX packet. Otherwise leave at 0 to disable delay interrupt. */ if (lp->coalesce_count_tx > 1) lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) << XAXIDMA_DELAY_SHIFT) | XAXIDMA_IRQ_DELAY_MASK; axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception. */ axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the * tail pointer register that the Tx channel will start transmitting. */ axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); } /** * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA * @ndev: Pointer to the net_device structure * * Return: 0, on success -ENOMEM, on failure * * This function is called to initialize the Rx and Tx DMA descriptor * rings. This initializes the descriptors with required default values * and is called when Axi Ethernet driver reset is called. */ static int axienet_dma_bd_init(struct net_device *ndev) { int i; struct sk_buff *skb; struct axienet_local *lp = netdev_priv(ndev); /* Reset the indexes which are used for accessing the BDs */ lp->tx_bd_ci = 0; lp->tx_bd_tail = 0; lp->rx_bd_ci = 0; /* Allocate the Tx and Rx buffer descriptors. */ lp->tx_bd_v = dma_alloc_coherent(lp->dev, sizeof(*lp->tx_bd_v) * lp->tx_bd_num, &lp->tx_bd_p, GFP_KERNEL); if (!lp->tx_bd_v) return -ENOMEM; lp->rx_bd_v = dma_alloc_coherent(lp->dev, sizeof(*lp->rx_bd_v) * lp->rx_bd_num, &lp->rx_bd_p, GFP_KERNEL); if (!lp->rx_bd_v) goto out; for (i = 0; i < lp->tx_bd_num; i++) { dma_addr_t addr = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num); lp->tx_bd_v[i].next = lower_32_bits(addr); if (lp->features & XAE_FEATURE_DMA_64BIT) lp->tx_bd_v[i].next_msb = upper_32_bits(addr); } for (i = 0; i < lp->rx_bd_num; i++) { dma_addr_t addr; addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num); lp->rx_bd_v[i].next = lower_32_bits(addr); if (lp->features & XAE_FEATURE_DMA_64BIT) lp->rx_bd_v[i].next_msb = upper_32_bits(addr); skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); if (!skb) goto out; lp->rx_bd_v[i].skb = skb; addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE); if (dma_mapping_error(lp->dev, addr)) { netdev_err(ndev, "DMA mapping error\n"); goto out; } desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); lp->rx_bd_v[i].cntrl = lp->max_frm_size; } axienet_dma_start(lp); return 0; out: axienet_dma_bd_release(ndev); return -ENOMEM; } /** * axienet_set_mac_address - Write the MAC address * @ndev: Pointer to the net_device structure * @address: 6 byte Address to be written as MAC address * * This function is called to initialize the MAC address of the Axi Ethernet * core. It writes to the UAW0 and UAW1 registers of the core. */ static void axienet_set_mac_address(struct net_device *ndev, const void *address) { struct axienet_local *lp = netdev_priv(ndev); if (address) eth_hw_addr_set(ndev, address); if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); /* Set up unicast MAC address filter set its mac address */ axienet_iow(lp, XAE_UAW0_OFFSET, (ndev->dev_addr[0]) | (ndev->dev_addr[1] << 8) | (ndev->dev_addr[2] << 16) | (ndev->dev_addr[3] << 24)); axienet_iow(lp, XAE_UAW1_OFFSET, (((axienet_ior(lp, XAE_UAW1_OFFSET)) & ~XAE_UAW1_UNICASTADDR_MASK) | (ndev->dev_addr[4] | (ndev->dev_addr[5] << 8)))); } /** * netdev_set_mac_address - Write the MAC address (from outside the driver) * @ndev: Pointer to the net_device structure * @p: 6 byte Address to be written as MAC address * * Return: 0 for all conditions. Presently, there is no failure case. * * This function is called to initialize the MAC address of the Axi Ethernet * core. It calls the core specific axienet_set_mac_address. This is the * function that goes into net_device_ops structure entry ndo_set_mac_address. */ static int netdev_set_mac_address(struct net_device *ndev, void *p) { struct sockaddr *addr = p; axienet_set_mac_address(ndev, addr->sa_data); return 0; } /** * axienet_set_multicast_list - Prepare the multicast table * @ndev: Pointer to the net_device structure * * This function is called to initialize the multicast table during * initialization. The Axi Ethernet basic multicast support has a four-entry * multicast table which is initialized here. Additionally this function * goes into the net_device_ops structure entry ndo_set_multicast_list. This * means whenever the multicast table entries need to be updated this * function gets called. */ static void axienet_set_multicast_list(struct net_device *ndev) { int i; u32 reg, af0reg, af1reg; struct axienet_local *lp = netdev_priv(ndev); if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { /* We must make the kernel realize we had to move into * promiscuous mode. If it was a promiscuous mode request * the flag is already set. If not we set it. */ ndev->flags |= IFF_PROMISC; reg = axienet_ior(lp, XAE_FMI_OFFSET); reg |= XAE_FMI_PM_MASK; axienet_iow(lp, XAE_FMI_OFFSET, reg); dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); } else if (!netdev_mc_empty(ndev)) { struct netdev_hw_addr *ha; i = 0; netdev_for_each_mc_addr(ha, ndev) { if (i >= XAE_MULTICAST_CAM_TABLE_NUM) break; af0reg = (ha->addr[0]); af0reg |= (ha->addr[1] << 8); af0reg |= (ha->addr[2] << 16); af0reg |= (ha->addr[3] << 24); af1reg = (ha->addr[4]); af1reg |= (ha->addr[5] << 8); reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; reg |= i; axienet_iow(lp, XAE_FMI_OFFSET, reg); axienet_iow(lp, XAE_AF0_OFFSET, af0reg); axienet_iow(lp, XAE_AF1_OFFSET, af1reg); i++; } } else { reg = axienet_ior(lp, XAE_FMI_OFFSET); reg &= ~XAE_FMI_PM_MASK; axienet_iow(lp, XAE_FMI_OFFSET, reg); for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; reg |= i; axienet_iow(lp, XAE_FMI_OFFSET, reg); axienet_iow(lp, XAE_AF0_OFFSET, 0); axienet_iow(lp, XAE_AF1_OFFSET, 0); } dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); } } /** * axienet_setoptions - Set an Axi Ethernet option * @ndev: Pointer to the net_device structure * @options: Option to be enabled/disabled * * The Axi Ethernet core has multiple features which can be selectively turned * on or off. The typical options could be jumbo frame option, basic VLAN * option, promiscuous mode option etc. This function is used to set or clear * these options in the Axi Ethernet hardware. This is done through * axienet_option structure . */ static void axienet_setoptions(struct net_device *ndev, u32 options) { int reg; struct axienet_local *lp = netdev_priv(ndev); struct axienet_option *tp = &axienet_options[0]; while (tp->opt) { reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); if (options & tp->opt) reg |= tp->m_or; axienet_iow(lp, tp->reg, reg); tp++; } lp->options |= options; } static int __axienet_device_reset(struct axienet_local *lp) { u32 value; int ret; /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset * process of Axi DMA takes a while to complete as all pending * commands/transfers will be flushed or completed during this * reset process. * Note that even though both TX and RX have their own reset register, * they both reset the entire DMA core, so only one needs to be used. */ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); ret = read_poll_timeout(axienet_dma_in32, value, !(value & XAXIDMA_CR_RESET_MASK), DELAY_OF_ONE_MILLISEC, 50000, false, lp, XAXIDMA_TX_CR_OFFSET); if (ret) { dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); return ret; } /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ ret = read_poll_timeout(axienet_ior, value, value & XAE_INT_PHYRSTCMPLT_MASK, DELAY_OF_ONE_MILLISEC, 50000, false, lp, XAE_IS_OFFSET); if (ret) { dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); return ret; } return 0; } /** * axienet_dma_stop - Stop DMA operation * @lp: Pointer to the axienet_local structure */ static void axienet_dma_stop(struct axienet_local *lp) { int count; u32 cr, sr; cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); synchronize_irq(lp->rx_irq); cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); synchronize_irq(lp->tx_irq); /* Give DMAs a chance to halt gracefully */ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { msleep(20); sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); } sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { msleep(20); sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); } /* Do a reset to ensure DMA is really stopped */ axienet_lock_mii(lp); __axienet_device_reset(lp); axienet_unlock_mii(lp); } /** * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. * @ndev: Pointer to the net_device structure * * This function is called to reset and initialize the Axi Ethernet core. This * is typically called during initialization. It does a reset of the Axi DMA * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines * are connected to Axi Ethernet reset lines, this in turn resets the Axi * Ethernet core. No separate hardware reset is done for the Axi Ethernet * core. * Returns 0 on success or a negative error number otherwise. */ static int axienet_device_reset(struct net_device *ndev) { u32 axienet_status; struct axienet_local *lp = netdev_priv(ndev); int ret; ret = __axienet_device_reset(lp); if (ret) return ret; lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; lp->options |= XAE_OPTION_VLAN; lp->options &= (~XAE_OPTION_JUMBO); if ((ndev->mtu > XAE_MTU) && (ndev->mtu <= XAE_JUMBO_MTU)) { lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + XAE_TRL_SIZE; if (lp->max_frm_size <= lp->rxmem) lp->options |= XAE_OPTION_JUMBO; } ret = axienet_dma_bd_init(ndev); if (ret) { netdev_err(ndev, "%s: descriptor allocation failed\n", __func__); return ret; } axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); axienet_status &= ~XAE_RCW1_RX_MASK; axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); axienet_status = axienet_ior(lp, XAE_IP_OFFSET); if (axienet_status & XAE_INT_RXRJECT_MASK) axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? XAE_INT_RECV_ERROR_MASK : 0); axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); /* Sync default options with HW but leave receiver and * transmitter disabled. */ axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); axienet_set_mac_address(ndev, NULL); axienet_set_multicast_list(ndev); axienet_setoptions(ndev, lp->options); netif_trans_update(ndev); return 0; } /** * axienet_free_tx_chain - Clean up a series of linked TX descriptors. * @lp: Pointer to the axienet_local structure * @first_bd: Index of first descriptor to clean up * @nr_bds: Max number of descriptors to clean up * @force: Whether to clean descriptors even if not complete * @sizep: Pointer to a u32 filled with the total sum of all bytes * in all cleaned-up descriptors. Ignored if NULL. * @budget: NAPI budget (use 0 when not called from NAPI poll) * * Would either be called after a successful transmit operation, or after * there was an error when setting up the chain. * Returns the number of descriptors handled. */ static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, int nr_bds, bool force, u32 *sizep, int budget) { struct axidma_bd *cur_p; unsigned int status; dma_addr_t phys; int i; for (i = 0; i < nr_bds; i++) { cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; status = cur_p->status; /* If force is not specified, clean up only descriptors * that have been completed by the MAC. */ if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) break; /* Ensure we see complete descriptor update */ dma_rmb(); phys = desc_get_phys_addr(lp, cur_p); dma_unmap_single(lp->dev, phys, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), DMA_TO_DEVICE); if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) napi_consume_skb(cur_p->skb, budget); cur_p->app0 = 0; cur_p->app1 = 0; cur_p->app2 = 0; cur_p->app4 = 0; cur_p->skb = NULL; /* ensure our transmit path and device don't prematurely see status cleared */ wmb(); cur_p->cntrl = 0; cur_p->status = 0; if (sizep) *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; } return i; } /** * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy * @lp: Pointer to the axienet_local structure * @num_frag: The number of BDs to check for * * Return: 0, on success * NETDEV_TX_BUSY, if any of the descriptors are not free * * This function is invoked before BDs are allocated and transmission starts. * This function returns 0 if a BD or group of BDs can be allocated for * transmission. If the BD or any of the BDs are not free the function * returns a busy status. */ static inline int axienet_check_tx_bd_space(struct axienet_local *lp, int num_frag) { struct axidma_bd *cur_p; /* Ensure we see all descriptor updates from device or TX polling */ rmb(); cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % lp->tx_bd_num]; if (cur_p->cntrl) return NETDEV_TX_BUSY; return 0; } /** * axienet_tx_poll - Invoked once a transmit is completed by the * Axi DMA Tx channel. * @napi: Pointer to NAPI structure. * @budget: Max number of TX packets to process. * * Return: Number of TX packets processed. * * This function is invoked from the NAPI processing to notify the completion * of transmit operation. It clears fields in the corresponding Tx BDs and * unmaps the corresponding buffer so that CPU can regain ownership of the * buffer. It finally invokes "netif_wake_queue" to restart transmission if * required. */ static int axienet_tx_poll(struct napi_struct *napi, int budget) { struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); struct net_device *ndev = lp->ndev; u32 size = 0; int packets; packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget); if (packets) { lp->tx_bd_ci += packets; if (lp->tx_bd_ci >= lp->tx_bd_num) lp->tx_bd_ci %= lp->tx_bd_num; u64_stats_update_begin(&lp->tx_stat_sync); u64_stats_add(&lp->tx_packets, packets); u64_stats_add(&lp->tx_bytes, size); u64_stats_update_end(&lp->tx_stat_sync); /* Matches barrier in axienet_start_xmit */ smp_mb(); if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) netif_wake_queue(ndev); } if (packets < budget && napi_complete_done(napi, packets)) { /* Re-enable TX completion interrupts. This should * cause an immediate interrupt if any TX packets are * already pending. */ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); } return packets; } /** * axienet_start_xmit - Starts the transmission. * @skb: sk_buff pointer that contains data to be Txed. * @ndev: Pointer to net_device structure. * * Return: NETDEV_TX_OK, on success * NETDEV_TX_BUSY, if any of the descriptors are not free * * This function is invoked from upper layers to initiate transmission. The * function uses the next available free BDs and populates their fields to * start the transmission. Additionally if checksum offloading is supported, * it populates AXI Stream Control fields with appropriate values. */ static netdev_tx_t axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { u32 ii; u32 num_frag; u32 csum_start_off; u32 csum_index_off; skb_frag_t *frag; dma_addr_t tail_p, phys; u32 orig_tail_ptr, new_tail_ptr; struct axienet_local *lp = netdev_priv(ndev); struct axidma_bd *cur_p; orig_tail_ptr = lp->tx_bd_tail; new_tail_ptr = orig_tail_ptr; num_frag = skb_shinfo(skb)->nr_frags; cur_p = &lp->tx_bd_v[orig_tail_ptr]; if (axienet_check_tx_bd_space(lp, num_frag + 1)) { /* Should not happen as last start_xmit call should have * checked for sufficient space and queue should only be * woken when sufficient space is available. */ netif_stop_queue(ndev); if (net_ratelimit()) netdev_warn(ndev, "TX ring unexpectedly full\n"); return NETDEV_TX_BUSY; } if (skb->ip_summed == CHECKSUM_PARTIAL) { if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { /* Tx Full Checksum Offload Enabled */ cur_p->app0 |= 2; } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { csum_start_off = skb_transport_offset(skb); csum_index_off = csum_start_off + skb->csum_offset; /* Tx Partial Checksum Offload Enabled */ cur_p->app0 |= 1; cur_p->app1 = (csum_start_off << 16) | csum_index_off; } } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ } phys = dma_map_single(lp->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(lp->dev, phys))) { if (net_ratelimit()) netdev_err(ndev, "TX DMA mapping error\n"); ndev->stats.tx_dropped++; return NETDEV_TX_OK; } desc_set_phys_addr(lp, phys, cur_p); cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; for (ii = 0; ii < num_frag; ii++) { if (++new_tail_ptr >= lp->tx_bd_num) new_tail_ptr = 0; cur_p = &lp->tx_bd_v[new_tail_ptr]; frag = &skb_shinfo(skb)->frags[ii]; phys = dma_map_single(lp->dev, skb_frag_address(frag), skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(lp->dev, phys))) { if (net_ratelimit()) netdev_err(ndev, "TX DMA mapping error\n"); ndev->stats.tx_dropped++; axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, true, NULL, 0); return NETDEV_TX_OK; } desc_set_phys_addr(lp, phys, cur_p); cur_p->cntrl = skb_frag_size(frag); } cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; cur_p->skb = skb; tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; if (++new_tail_ptr >= lp->tx_bd_num) new_tail_ptr = 0; WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); /* Start the transfer */ axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); /* Stop queue if next transmit may not have space */ if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { netif_stop_queue(ndev); /* Matches barrier in axienet_tx_poll */ smp_mb(); /* Space might have just been freed - check again */ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) netif_wake_queue(ndev); } return NETDEV_TX_OK; } /** * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. * @napi: Pointer to NAPI structure. * @budget: Max number of RX packets to process. * * Return: Number of RX packets processed. */ static int axienet_rx_poll(struct napi_struct *napi, int budget) { u32 length; u32 csumstatus; u32 size = 0; int packets = 0; dma_addr_t tail_p = 0; struct axidma_bd *cur_p; struct sk_buff *skb, *new_skb; struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx); cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { dma_addr_t phys; /* Ensure we see complete descriptor update */ dma_rmb(); skb = cur_p->skb; cur_p->skb = NULL; /* skb could be NULL if a previous pass already received the * packet for this slot in the ring, but failed to refill it * with a newly allocated buffer. In this case, don't try to * receive it again. */ if (likely(skb)) { length = cur_p->app4 & 0x0000FFFF; phys = desc_get_phys_addr(lp, cur_p); dma_unmap_single(lp->dev, phys, lp->max_frm_size, DMA_FROM_DEVICE); skb_put(skb, length); skb->protocol = eth_type_trans(skb, lp->ndev); /*skb_checksum_none_assert(skb);*/ skb->ip_summed = CHECKSUM_NONE; /* if we're doing Rx csum offload, set it up */ if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { csumstatus = (cur_p->app2 & XAE_FULL_CSUM_STATUS_MASK) >> 3; if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { skb->ip_summed = CHECKSUM_UNNECESSARY; } } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && skb->protocol == htons(ETH_P_IP) && skb->len > 64) { skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); skb->ip_summed = CHECKSUM_COMPLETE; } napi_gro_receive(napi, skb); size += length; packets++; } new_skb = napi_alloc_skb(napi, lp->max_frm_size); if (!new_skb) break; phys = dma_map_single(lp->dev, new_skb->data, lp->max_frm_size, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(lp->dev, phys))) { if (net_ratelimit()) netdev_err(lp->ndev, "RX DMA mapping error\n"); dev_kfree_skb(new_skb); break; } desc_set_phys_addr(lp, phys, cur_p); cur_p->cntrl = lp->max_frm_size; cur_p->status = 0; cur_p->skb = new_skb; /* Only update tail_p to mark this slot as usable after it has * been successfully refilled. */ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; if (++lp->rx_bd_ci >= lp->rx_bd_num) lp->rx_bd_ci = 0; cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; } u64_stats_update_begin(&lp->rx_stat_sync); u64_stats_add(&lp->rx_packets, packets); u64_stats_add(&lp->rx_bytes, size); u64_stats_update_end(&lp->rx_stat_sync); if (tail_p) axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); if (packets < budget && napi_complete_done(napi, packets)) { /* Re-enable RX completion interrupts. This should * cause an immediate interrupt if any RX packets are * already pending. */ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); } return packets; } /** * axienet_tx_irq - Tx Done Isr. * @irq: irq number * @_ndev: net_device pointer * * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. * * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the * TX BD processing. */ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) { unsigned int status; struct net_device *ndev = _ndev; struct axienet_local *lp = netdev_priv(ndev); status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); if (!(status & XAXIDMA_IRQ_ALL_MASK)) return IRQ_NONE; axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { netdev_err(ndev, "DMA Tx error 0x%x\n", status); netdev_err(ndev, "Current BD is at: 0x%x%08x\n", (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, (lp->tx_bd_v[lp->tx_bd_ci]).phys); schedule_work(&lp->dma_err_task); } else { /* Disable further TX completion interrupts and schedule * NAPI to handle the completions. */ u32 cr = lp->tx_dma_cr; cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); napi_schedule(&lp->napi_tx); } return IRQ_HANDLED; } /** * axienet_rx_irq - Rx Isr. * @irq: irq number * @_ndev: net_device pointer * * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. * * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD * processing. */ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) { unsigned int status; struct net_device *ndev = _ndev; struct axienet_local *lp = netdev_priv(ndev); status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); if (!(status & XAXIDMA_IRQ_ALL_MASK)) return IRQ_NONE; axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { netdev_err(ndev, "DMA Rx error 0x%x\n", status); netdev_err(ndev, "Current BD is at: 0x%x%08x\n", (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, (lp->rx_bd_v[lp->rx_bd_ci]).phys); schedule_work(&lp->dma_err_task); } else { /* Disable further RX completion interrupts and schedule * NAPI receive. */ u32 cr = lp->rx_dma_cr; cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); napi_schedule(&lp->napi_rx); } return IRQ_HANDLED; } /** * axienet_eth_irq - Ethernet core Isr. * @irq: irq number * @_ndev: net_device pointer * * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. * * Handle miscellaneous conditions indicated by Ethernet core IRQ. */ static irqreturn_t axienet_eth_irq(int irq, void *_ndev) { struct net_device *ndev = _ndev; struct axienet_local *lp = netdev_priv(ndev); unsigned int pending; pending = axienet_ior(lp, XAE_IP_OFFSET); if (!pending) return IRQ_NONE; if (pending & XAE_INT_RXFIFOOVR_MASK) ndev->stats.rx_missed_errors++; if (pending & XAE_INT_RXRJECT_MASK) ndev->stats.rx_frame_errors++; axienet_iow(lp, XAE_IS_OFFSET, pending); return IRQ_HANDLED; } static void axienet_dma_err_handler(struct work_struct *work); /** * axienet_open - Driver open routine. * @ndev: Pointer to net_device structure * * Return: 0, on success. * non-zero error value on failure * * This is the driver open routine. It calls phylink_start to start the * PHY device. * It also allocates interrupt service routines, enables the interrupt lines * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer * descriptors are initialized. */ static int axienet_open(struct net_device *ndev) { int ret; struct axienet_local *lp = netdev_priv(ndev); dev_dbg(&ndev->dev, "axienet_open()\n"); /* When we do an Axi Ethernet reset, it resets the complete core * including the MDIO. MDIO must be disabled before resetting. * Hold MDIO bus lock to avoid MDIO accesses during the reset. */ axienet_lock_mii(lp); ret = axienet_device_reset(ndev); axienet_unlock_mii(lp); ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); if (ret) { dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); return ret; } phylink_start(lp->phylink); /* Enable worker thread for Axi DMA error handling */ INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); napi_enable(&lp->napi_rx); napi_enable(&lp->napi_tx); /* Enable interrupts for Axi DMA Tx */ ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, ndev->name, ndev); if (ret) goto err_tx_irq; /* Enable interrupts for Axi DMA Rx */ ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, ndev->name, ndev); if (ret) goto err_rx_irq; /* Enable interrupts for Axi Ethernet core (if defined) */ if (lp->eth_irq > 0) { ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, ndev->name, ndev); if (ret) goto err_eth_irq; } return 0; err_eth_irq: free_irq(lp->rx_irq, ndev); err_rx_irq: free_irq(lp->tx_irq, ndev); err_tx_irq: napi_disable(&lp->napi_tx); napi_disable(&lp->napi_rx); phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); cancel_work_sync(&lp->dma_err_task); dev_err(lp->dev, "request_irq() failed\n"); return ret; } /** * axienet_stop - Driver stop routine. * @ndev: Pointer to net_device structure * * Return: 0, on success. * * This is the driver stop routine. It calls phylink_disconnect to stop the PHY * device. It also removes the interrupt handlers and disables the interrupts. * The Axi DMA Tx/Rx BDs are released. */ static int axienet_stop(struct net_device *ndev) { struct axienet_local *lp = netdev_priv(ndev); dev_dbg(&ndev->dev, "axienet_close()\n"); napi_disable(&lp->napi_tx); napi_disable(&lp->napi_rx); phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); axienet_dma_stop(lp); axienet_iow(lp, XAE_IE_OFFSET, 0); cancel_work_sync(&lp->dma_err_task); if (lp->eth_irq > 0) free_irq(lp->eth_irq, ndev); free_irq(lp->tx_irq, ndev); free_irq(lp->rx_irq, ndev); axienet_dma_bd_release(ndev); return 0; } /** * axienet_change_mtu - Driver change mtu routine. * @ndev: Pointer to net_device structure * @new_mtu: New mtu value to be applied * * Return: Always returns 0 (success). * * This is the change mtu driver routine. It checks if the Axi Ethernet * hardware supports jumbo frames before changing the mtu. This can be * called only when the device is not up. */ static int axienet_change_mtu(struct net_device *ndev, int new_mtu) { struct axienet_local *lp = netdev_priv(ndev); if (netif_running(ndev)) return -EBUSY; if ((new_mtu + VLAN_ETH_HLEN + XAE_TRL_SIZE) > lp->rxmem) return -EINVAL; ndev->mtu = new_mtu; return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER /** * axienet_poll_controller - Axi Ethernet poll mechanism. * @ndev: Pointer to net_device structure * * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior * to polling the ISRs and are enabled back after the polling is done. */ static void axienet_poll_controller(struct net_device *ndev) { struct axienet_local *lp = netdev_priv(ndev); disable_irq(lp->tx_irq); disable_irq(lp->rx_irq); axienet_rx_irq(lp->tx_irq, ndev); axienet_tx_irq(lp->rx_irq, ndev); enable_irq(lp->tx_irq); enable_irq(lp->rx_irq); } #endif static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct axienet_local *lp = netdev_priv(dev); if (!netif_running(dev)) return -EINVAL; return phylink_mii_ioctl(lp->phylink, rq, cmd); } static void axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct axienet_local *lp = netdev_priv(dev); unsigned int start; netdev_stats_to_stats64(stats, &dev->stats); do { start = u64_stats_fetch_begin(&lp->rx_stat_sync); stats->rx_packets = u64_stats_read(&lp->rx_packets); stats->rx_bytes = u64_stats_read(&lp->rx_bytes); } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start)); do { start = u64_stats_fetch_begin(&lp->tx_stat_sync); stats->tx_packets = u64_stats_read(&lp->tx_packets); stats->tx_bytes = u64_stats_read(&lp->tx_bytes); } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); } static const struct net_device_ops axienet_netdev_ops = { .ndo_open = axienet_open, .ndo_stop = axienet_stop, .ndo_start_xmit = axienet_start_xmit, .ndo_get_stats64 = axienet_get_stats64, .ndo_change_mtu = axienet_change_mtu, .ndo_set_mac_address = netdev_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = axienet_ioctl, .ndo_set_rx_mode = axienet_set_multicast_list, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = axienet_poll_controller, #endif }; /** * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. * @ndev: Pointer to net_device structure * @ed: Pointer to ethtool_drvinfo structure * * This implements ethtool command for getting the driver information. * Issue "ethtool -i ethX" under linux prompt to execute this function. */ static void axienet_ethtools_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) { strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); } /** * axienet_ethtools_get_regs_len - Get the total regs length present in the * AxiEthernet core. * @ndev: Pointer to net_device structure * * This implements ethtool command for getting the total register length * information. * * Return: the total regs length */ static int axienet_ethtools_get_regs_len(struct net_device *ndev) { return sizeof(u32) * AXIENET_REGS_N; } /** * axienet_ethtools_get_regs - Dump the contents of all registers present * in AxiEthernet core. * @ndev: Pointer to net_device structure * @regs: Pointer to ethtool_regs structure * @ret: Void pointer used to return the contents of the registers. * * This implements ethtool command for getting the Axi Ethernet register dump. * Issue "ethtool -d ethX" to execute this function. */ static void axienet_ethtools_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *ret) { u32 *data = (u32 *)ret; size_t len = sizeof(u32) * AXIENET_REGS_N; struct axienet_local *lp = netdev_priv(ndev); regs->version = 0; regs->len = len; memset(data, 0, len); data[0] = axienet_ior(lp, XAE_RAF_OFFSET); data[1] = axienet_ior(lp, XAE_TPF_OFFSET); data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); data[3] = axienet_ior(lp, XAE_IS_OFFSET); data[4] = axienet_ior(lp, XAE_IP_OFFSET); data[5] = axienet_ior(lp, XAE_IE_OFFSET); data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); data[12] = axienet_ior(lp, XAE_PPST_OFFSET); data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); data[15] = axienet_ior(lp, XAE_TC_OFFSET); data[16] = axienet_ior(lp, XAE_FCC_OFFSET); data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); data[29] = axienet_ior(lp, XAE_FMI_OFFSET); data[30] = axienet_ior(lp, XAE_AF0_OFFSET); data[31] = axienet_ior(lp, XAE_AF1_OFFSET); data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); } static void axienet_ethtools_get_ringparam(struct net_device *ndev, struct ethtool_ringparam *ering, struct kernel_ethtool_ringparam *kernel_ering, struct netlink_ext_ack *extack) { struct axienet_local *lp = netdev_priv(ndev); ering->rx_max_pending = RX_BD_NUM_MAX; ering->rx_mini_max_pending = 0; ering->rx_jumbo_max_pending = 0; ering->tx_max_pending = TX_BD_NUM_MAX; ering->rx_pending = lp->rx_bd_num; ering->rx_mini_pending = 0; ering->rx_jumbo_pending = 0; ering->tx_pending = lp->tx_bd_num; } static int axienet_ethtools_set_ringparam(struct net_device *ndev, struct ethtool_ringparam *ering, struct kernel_ethtool_ringparam *kernel_ering, struct netlink_ext_ack *extack) { struct axienet_local *lp = netdev_priv(ndev); if (ering->rx_pending > RX_BD_NUM_MAX || ering->rx_mini_pending || ering->rx_jumbo_pending || ering->tx_pending < TX_BD_NUM_MIN || ering->tx_pending > TX_BD_NUM_MAX) return -EINVAL; if (netif_running(ndev)) return -EBUSY; lp->rx_bd_num = ering->rx_pending; lp->tx_bd_num = ering->tx_pending; return 0; } /** * axienet_ethtools_get_pauseparam - Get the pause parameter setting for * Tx and Rx paths. * @ndev: Pointer to net_device structure * @epauseparm: Pointer to ethtool_pauseparam structure. * * This implements ethtool command for getting axi ethernet pause frame * setting. Issue "ethtool -a ethX" to execute this function. */ static void axienet_ethtools_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *epauseparm) { struct axienet_local *lp = netdev_priv(ndev); phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); } /** * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) * settings. * @ndev: Pointer to net_device structure * @epauseparm:Pointer to ethtool_pauseparam structure * * This implements ethtool command for enabling flow control on Rx and Tx * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this * function. * * Return: 0 on success, -EFAULT if device is running */ static int axienet_ethtools_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *epauseparm) { struct axienet_local *lp = netdev_priv(ndev); return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); } /** * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. * @ndev: Pointer to net_device structure * @ecoalesce: Pointer to ethtool_coalesce structure * @kernel_coal: ethtool CQE mode setting structure * @extack: extack for reporting error messages * * This implements ethtool command for getting the DMA interrupt coalescing * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to * execute this function. * * Return: 0 always */ static int axienet_ethtools_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ecoalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct axienet_local *lp = netdev_priv(ndev); ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx; ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx; ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx; ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx; return 0; } /** * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. * @ndev: Pointer to net_device structure * @ecoalesce: Pointer to ethtool_coalesce structure * @kernel_coal: ethtool CQE mode setting structure * @extack: extack for reporting error messages * * This implements ethtool command for setting the DMA interrupt coalescing * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux * prompt to execute this function. * * Return: 0, on success, Non-zero error value on failure. */ static int axienet_ethtools_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ecoalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct axienet_local *lp = netdev_priv(ndev); if (netif_running(ndev)) { netdev_err(ndev, "Please stop netif before applying configuration\n"); return -EFAULT; } if (ecoalesce->rx_max_coalesced_frames) lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; if (ecoalesce->rx_coalesce_usecs) lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; if (ecoalesce->tx_max_coalesced_frames) lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; if (ecoalesce->tx_coalesce_usecs) lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; return 0; } static int axienet_ethtools_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *cmd) { struct axienet_local *lp = netdev_priv(ndev); return phylink_ethtool_ksettings_get(lp->phylink, cmd); } static int axienet_ethtools_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd) { struct axienet_local *lp = netdev_priv(ndev); return phylink_ethtool_ksettings_set(lp->phylink, cmd); } static int axienet_ethtools_nway_reset(struct net_device *dev) { struct axienet_local *lp = netdev_priv(dev); return phylink_ethtool_nway_reset(lp->phylink); } static const struct ethtool_ops axienet_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USECS, .get_drvinfo = axienet_ethtools_get_drvinfo, .get_regs_len = axienet_ethtools_get_regs_len, .get_regs = axienet_ethtools_get_regs, .get_link = ethtool_op_get_link, .get_ringparam = axienet_ethtools_get_ringparam, .set_ringparam = axienet_ethtools_set_ringparam, .get_pauseparam = axienet_ethtools_get_pauseparam, .set_pauseparam = axienet_ethtools_set_pauseparam, .get_coalesce = axienet_ethtools_get_coalesce, .set_coalesce = axienet_ethtools_set_coalesce, .get_link_ksettings = axienet_ethtools_get_link_ksettings, .set_link_ksettings = axienet_ethtools_set_link_ksettings, .nway_reset = axienet_ethtools_nway_reset, }; static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) { return container_of(pcs, struct axienet_local, pcs); } static void axienet_pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; phylink_mii_c22_pcs_get_state(pcs_phy, state); } static void axienet_pcs_an_restart(struct phylink_pcs *pcs) { struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; phylink_mii_c22_pcs_an_restart(pcs_phy); } static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, const unsigned long *advertising, bool permit_pause_to_mac) { struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; struct axienet_local *lp = netdev_priv(ndev); int ret; if (lp->switch_x_sgmii) { ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG, interface == PHY_INTERFACE_MODE_SGMII ? XLNX_MII_STD_SELECT_SGMII : 0); if (ret < 0) { netdev_warn(ndev, "Failed to switch PHY interface: %d\n", ret); return ret; } } ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising, neg_mode); if (ret < 0) netdev_warn(ndev, "Failed to configure PCS: %d\n", ret); return ret; } static const struct phylink_pcs_ops axienet_pcs_ops = { .pcs_get_state = axienet_pcs_get_state, .pcs_config = axienet_pcs_config, .pcs_an_restart = axienet_pcs_an_restart, }; static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config, phy_interface_t interface) { struct net_device *ndev = to_net_dev(config->dev); struct axienet_local *lp = netdev_priv(ndev); if (interface == PHY_INTERFACE_MODE_1000BASEX || interface == PHY_INTERFACE_MODE_SGMII) return &lp->pcs; return NULL; } static void axienet_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { /* nothing meaningful to do */ } static void axienet_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { /* nothing meaningful to do */ } static void axienet_mac_link_up(struct phylink_config *config, struct phy_device *phy, unsigned int mode, phy_interface_t interface, int speed, int duplex, bool tx_pause, bool rx_pause) { struct net_device *ndev = to_net_dev(config->dev); struct axienet_local *lp = netdev_priv(ndev); u32 emmc_reg, fcc_reg; emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; switch (speed) { case SPEED_1000: emmc_reg |= XAE_EMMC_LINKSPD_1000; break; case SPEED_100: emmc_reg |= XAE_EMMC_LINKSPD_100; break; case SPEED_10: emmc_reg |= XAE_EMMC_LINKSPD_10; break; default: dev_err(&ndev->dev, "Speed other than 10, 100 or 1Gbps is not supported\n"); break; } axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); if (tx_pause) fcc_reg |= XAE_FCC_FCTX_MASK; else fcc_reg &= ~XAE_FCC_FCTX_MASK; if (rx_pause) fcc_reg |= XAE_FCC_FCRX_MASK; else fcc_reg &= ~XAE_FCC_FCRX_MASK; axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); } static const struct phylink_mac_ops axienet_phylink_ops = { .mac_select_pcs = axienet_mac_select_pcs, .mac_config = axienet_mac_config, .mac_link_down = axienet_mac_link_down, .mac_link_up = axienet_mac_link_up, }; /** * axienet_dma_err_handler - Work queue task for Axi DMA Error * @work: pointer to work_struct * * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the * Tx/Rx BDs. */ static void axienet_dma_err_handler(struct work_struct *work) { u32 i; u32 axienet_status; struct axidma_bd *cur_p; struct axienet_local *lp = container_of(work, struct axienet_local, dma_err_task); struct net_device *ndev = lp->ndev; napi_disable(&lp->napi_tx); napi_disable(&lp->napi_rx); axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); axienet_dma_stop(lp); for (i = 0; i < lp->tx_bd_num; i++) { cur_p = &lp->tx_bd_v[i]; if (cur_p->cntrl) { dma_addr_t addr = desc_get_phys_addr(lp, cur_p); dma_unmap_single(lp->dev, addr, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), DMA_TO_DEVICE); } if (cur_p->skb) dev_kfree_skb_irq(cur_p->skb); cur_p->phys = 0; cur_p->phys_msb = 0; cur_p->cntrl = 0; cur_p->status = 0; cur_p->app0 = 0; cur_p->app1 = 0; cur_p->app2 = 0; cur_p->app3 = 0; cur_p->app4 = 0; cur_p->skb = NULL; } for (i = 0; i < lp->rx_bd_num; i++) { cur_p = &lp->rx_bd_v[i]; cur_p->status = 0; cur_p->app0 = 0; cur_p->app1 = 0; cur_p->app2 = 0; cur_p->app3 = 0; cur_p->app4 = 0; } lp->tx_bd_ci = 0; lp->tx_bd_tail = 0; lp->rx_bd_ci = 0; axienet_dma_start(lp); axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); axienet_status &= ~XAE_RCW1_RX_MASK; axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); axienet_status = axienet_ior(lp, XAE_IP_OFFSET); if (axienet_status & XAE_INT_RXRJECT_MASK) axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? XAE_INT_RECV_ERROR_MASK : 0); axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); /* Sync default options with HW but leave receiver and * transmitter disabled. */ axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); axienet_set_mac_address(ndev, NULL); axienet_set_multicast_list(ndev); axienet_setoptions(ndev, lp->options); napi_enable(&lp->napi_rx); napi_enable(&lp->napi_tx); } /** * axienet_probe - Axi Ethernet probe function. * @pdev: Pointer to platform device structure. * * Return: 0, on success * Non-zero error value on failure. * * This is the probe routine for Axi Ethernet driver. This is called before * any other driver routines are invoked. It allocates and sets up the Ethernet * device. Parses through device tree and populates fields of * axienet_local. It registers the Ethernet device. */ static int axienet_probe(struct platform_device *pdev) { int ret; struct device_node *np; struct axienet_local *lp; struct net_device *ndev; struct resource *ethres; u8 mac_addr[ETH_ALEN]; int addr_width = 32; u32 value; ndev = alloc_etherdev(sizeof(*lp)); if (!ndev) return -ENOMEM; platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ ndev->features = NETIF_F_SG; ndev->netdev_ops = &axienet_netdev_ops; ndev->ethtool_ops = &axienet_ethtool_ops; /* MTU range: 64 - 9000 */ ndev->min_mtu = 64; ndev->max_mtu = XAE_JUMBO_MTU; lp = netdev_priv(ndev); lp->ndev = ndev; lp->dev = &pdev->dev; lp->options = XAE_OPTION_DEFAULTS; lp->rx_bd_num = RX_BD_NUM_DEFAULT; lp->tx_bd_num = TX_BD_NUM_DEFAULT; u64_stats_init(&lp->rx_stat_sync); u64_stats_init(&lp->tx_stat_sync); netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); if (!lp->axi_clk) { /* For backward compatibility, if named AXI clock is not present, * treat the first clock specified as the AXI clock. */ lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); } if (IS_ERR(lp->axi_clk)) { ret = PTR_ERR(lp->axi_clk); goto free_netdev; } ret = clk_prepare_enable(lp->axi_clk); if (ret) { dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); goto free_netdev; } lp->misc_clks[0].id = "axis_clk"; lp->misc_clks[1].id = "ref_clk"; lp->misc_clks[2].id = "mgt_clk"; ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); if (ret) goto cleanup_clk; ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); if (ret) goto cleanup_clk; /* Map device registers */ lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres); if (IS_ERR(lp->regs)) { ret = PTR_ERR(lp->regs); goto cleanup_clk; } lp->regs_start = ethres->start; /* Setup checksum offload, but default to off if not specified */ lp->features = 0; ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); if (!ret) { switch (value) { case 1: lp->csum_offload_on_tx_path = XAE_FEATURE_PARTIAL_TX_CSUM; lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; /* Can checksum TCP/UDP over IPv4. */ ndev->features |= NETIF_F_IP_CSUM; break; case 2: lp->csum_offload_on_tx_path = XAE_FEATURE_FULL_TX_CSUM; lp->features |= XAE_FEATURE_FULL_TX_CSUM; /* Can checksum TCP/UDP over IPv4. */ ndev->features |= NETIF_F_IP_CSUM; break; default: lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; } } ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); if (!ret) { switch (value) { case 1: lp->csum_offload_on_rx_path = XAE_FEATURE_PARTIAL_RX_CSUM; lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; break; case 2: lp->csum_offload_on_rx_path = XAE_FEATURE_FULL_RX_CSUM; lp->features |= XAE_FEATURE_FULL_RX_CSUM; break; default: lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; } } /* For supporting jumbo frames, the Axi Ethernet hardware must have * a larger Rx/Tx Memory. Typically, the size must be large so that * we can enable jumbo option and start supporting jumbo frames. * Here we check for memory allocated for Rx/Tx in the hardware from * the device-tree and accordingly set flags. */ of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, "xlnx,switch-x-sgmii"); /* Start with the proprietary, and broken phy_type */ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); if (!ret) { netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); switch (value) { case XAE_PHY_TYPE_MII: lp->phy_mode = PHY_INTERFACE_MODE_MII; break; case XAE_PHY_TYPE_GMII: lp->phy_mode = PHY_INTERFACE_MODE_GMII; break; case XAE_PHY_TYPE_RGMII_2_0: lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; break; case XAE_PHY_TYPE_SGMII: lp->phy_mode = PHY_INTERFACE_MODE_SGMII; break; case XAE_PHY_TYPE_1000BASE_X: lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; break; default: ret = -EINVAL; goto cleanup_clk; } } else { ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); if (ret) goto cleanup_clk; } if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); ret = -EINVAL; goto cleanup_clk; } /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); if (np) { struct resource dmares; ret = of_address_to_resource(np, 0, &dmares); if (ret) { dev_err(&pdev->dev, "unable to get DMA resource\n"); of_node_put(np); goto cleanup_clk; } lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); lp->rx_irq = irq_of_parse_and_map(np, 1); lp->tx_irq = irq_of_parse_and_map(np, 0); of_node_put(np); lp->eth_irq = platform_get_irq_optional(pdev, 0); } else { /* Check for these resources directly on the Ethernet node. */ lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); lp->rx_irq = platform_get_irq(pdev, 1); lp->tx_irq = platform_get_irq(pdev, 0); lp->eth_irq = platform_get_irq_optional(pdev, 2); } if (IS_ERR(lp->dma_regs)) { dev_err(&pdev->dev, "could not map DMA regs\n"); ret = PTR_ERR(lp->dma_regs); goto cleanup_clk; } if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { dev_err(&pdev->dev, "could not determine irqs\n"); ret = -ENOMEM; goto cleanup_clk; } /* Reset core now that clocks are enabled, prior to accessing MDIO */ ret = __axienet_device_reset(lp); if (ret) goto cleanup_clk; /* Autodetect the need for 64-bit DMA pointers. * When the IP is configured for a bus width bigger than 32 bits, * writing the MSB registers is mandatory, even if they are all 0. * We can detect this case by writing all 1's to one such register * and see if that sticks: when the IP is configured for 32 bits * only, those registers are RES0. * Those MSB registers were introduced in IP v7.1, which we check first. */ if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; iowrite32(0x0, desc); if (ioread32(desc) == 0) { /* sanity check */ iowrite32(0xffffffff, desc); if (ioread32(desc) > 0) { lp->features |= XAE_FEATURE_DMA_64BIT; addr_width = 64; dev_info(&pdev->dev, "autodetected 64-bit DMA range\n"); } iowrite32(0x0, desc); } } if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); ret = -EINVAL; goto cleanup_clk; } ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); if (ret) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto cleanup_clk; } /* Check for Ethernet core IRQ (optional) */ if (lp->eth_irq <= 0) dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); /* Retrieve the MAC address */ ret = of_get_mac_address(pdev->dev.of_node, mac_addr); if (!ret) { axienet_set_mac_address(ndev, mac_addr); } else { dev_warn(&pdev->dev, "could not find MAC address property: %d\n", ret); axienet_set_mac_address(ndev, NULL); } lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; ret = axienet_mdio_setup(lp); if (ret) dev_warn(&pdev->dev, "error registering MDIO bus: %d\n", ret); if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); if (!np) { /* Deprecated: Always use "pcs-handle" for pcs_phy. * Falling back to "phy-handle" here is only for * backward compatibility with old device trees. */ np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); } if (!np) { dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); ret = -EINVAL; goto cleanup_mdio; } lp->pcs_phy = of_mdio_find_device(np); if (!lp->pcs_phy) { ret = -EPROBE_DEFER; of_node_put(np); goto cleanup_mdio; } of_node_put(np); lp->pcs.ops = &axienet_pcs_ops; lp->pcs.neg_mode = true; lp->pcs.poll = true; } lp->phylink_config.dev = &ndev->dev; lp->phylink_config.type = PHYLINK_NETDEV; lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10FD | MAC_100FD | MAC_1000FD; __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); if (lp->switch_x_sgmii) { __set_bit(PHY_INTERFACE_MODE_1000BASEX, lp->phylink_config.supported_interfaces); __set_bit(PHY_INTERFACE_MODE_SGMII, lp->phylink_config.supported_interfaces); } lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, lp->phy_mode, &axienet_phylink_ops); if (IS_ERR(lp->phylink)) { ret = PTR_ERR(lp->phylink); dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); goto cleanup_mdio; } ret = register_netdev(lp->ndev); if (ret) { dev_err(lp->dev, "register_netdev() error (%i)\n", ret); goto cleanup_phylink; } return 0; cleanup_phylink: phylink_destroy(lp->phylink); cleanup_mdio: if (lp->pcs_phy) put_device(&lp->pcs_phy->dev); if (lp->mii_bus) axienet_mdio_teardown(lp); cleanup_clk: clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); clk_disable_unprepare(lp->axi_clk); free_netdev: free_netdev(ndev); return ret; } static int axienet_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct axienet_local *lp = netdev_priv(ndev); unregister_netdev(ndev); if (lp->phylink) phylink_destroy(lp->phylink); if (lp->pcs_phy) put_device(&lp->pcs_phy->dev); axienet_mdio_teardown(lp); clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); clk_disable_unprepare(lp->axi_clk); free_netdev(ndev); return 0; } static void axienet_shutdown(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); rtnl_lock(); netif_device_detach(ndev); if (netif_running(ndev)) dev_close(ndev); rtnl_unlock(); } static int axienet_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); if (!netif_running(ndev)) return 0; netif_device_detach(ndev); rtnl_lock(); axienet_stop(ndev); rtnl_unlock(); return 0; } static int axienet_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); if (!netif_running(ndev)) return 0; rtnl_lock(); axienet_open(ndev); rtnl_unlock(); netif_device_attach(ndev); return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, axienet_suspend, axienet_resume); static struct platform_driver axienet_driver = { .probe = axienet_probe, .remove = axienet_remove, .shutdown = axienet_shutdown, .driver = { .name = "xilinx_axienet", .pm = &axienet_pm_ops, .of_match_table = axienet_of_match, }, }; module_platform_driver(axienet_driver); MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); MODULE_AUTHOR("Xilinx"); MODULE_LICENSE("GPL");
linux-master
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
// SPDX-License-Identifier: GPL-2.0 /* * sni_ave.c - Socionext UniPhier AVE ethernet driver * Copyright 2014 Panasonic Corporation * Copyright 2015-2017 Socionext Inc. */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/etherdevice.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/mfd/syscon.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_net.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/types.h> #include <linux/u64_stats_sync.h> /* General Register Group */ #define AVE_IDR 0x000 /* ID */ #define AVE_VR 0x004 /* Version */ #define AVE_GRR 0x008 /* Global Reset */ #define AVE_CFGR 0x00c /* Configuration */ /* Interrupt Register Group */ #define AVE_GIMR 0x100 /* Global Interrupt Mask */ #define AVE_GISR 0x104 /* Global Interrupt Status */ /* MAC Register Group */ #define AVE_TXCR 0x200 /* TX Setup */ #define AVE_RXCR 0x204 /* RX Setup */ #define AVE_RXMAC1R 0x208 /* MAC address (lower) */ #define AVE_RXMAC2R 0x20c /* MAC address (upper) */ #define AVE_MDIOCTR 0x214 /* MDIO Control */ #define AVE_MDIOAR 0x218 /* MDIO Address */ #define AVE_MDIOWDR 0x21c /* MDIO Data */ #define AVE_MDIOSR 0x220 /* MDIO Status */ #define AVE_MDIORDR 0x224 /* MDIO Rd Data */ /* Descriptor Control Register Group */ #define AVE_DESCC 0x300 /* Descriptor Control */ #define AVE_TXDC 0x304 /* TX Descriptor Configuration */ #define AVE_RXDC0 0x308 /* RX Descriptor Ring0 Configuration */ #define AVE_IIRQC 0x34c /* Interval IRQ Control */ /* Packet Filter Register Group */ #define AVE_PKTF_BASE 0x800 /* PF Base Address */ #define AVE_PFMBYTE_BASE 0xd00 /* PF Mask Byte Base Address */ #define AVE_PFMBIT_BASE 0xe00 /* PF Mask Bit Base Address */ #define AVE_PFSEL_BASE 0xf00 /* PF Selector Base Address */ #define AVE_PFEN 0xffc /* Packet Filter Enable */ #define AVE_PKTF(ent) (AVE_PKTF_BASE + (ent) * 0x40) #define AVE_PFMBYTE(ent) (AVE_PFMBYTE_BASE + (ent) * 8) #define AVE_PFMBIT(ent) (AVE_PFMBIT_BASE + (ent) * 4) #define AVE_PFSEL(ent) (AVE_PFSEL_BASE + (ent) * 4) /* 64bit descriptor memory */ #define AVE_DESC_SIZE_64 12 /* Descriptor Size */ #define AVE_TXDM_64 0x1000 /* Tx Descriptor Memory */ #define AVE_RXDM_64 0x1c00 /* Rx Descriptor Memory */ #define AVE_TXDM_SIZE_64 0x0ba0 /* Tx Descriptor Memory Size 3KB */ #define AVE_RXDM_SIZE_64 0x6000 /* Rx Descriptor Memory Size 24KB */ /* 32bit descriptor memory */ #define AVE_DESC_SIZE_32 8 /* Descriptor Size */ #define AVE_TXDM_32 0x1000 /* Tx Descriptor Memory */ #define AVE_RXDM_32 0x1800 /* Rx Descriptor Memory */ #define AVE_TXDM_SIZE_32 0x07c0 /* Tx Descriptor Memory Size 2KB */ #define AVE_RXDM_SIZE_32 0x4000 /* Rx Descriptor Memory Size 16KB */ /* RMII Bridge Register Group */ #define AVE_RSTCTRL 0x8028 /* Reset control */ #define AVE_RSTCTRL_RMIIRST BIT(16) #define AVE_LINKSEL 0x8034 /* Link speed setting */ #define AVE_LINKSEL_100M BIT(0) /* AVE_GRR */ #define AVE_GRR_RXFFR BIT(5) /* Reset RxFIFO */ #define AVE_GRR_PHYRST BIT(4) /* Reset external PHY */ #define AVE_GRR_GRST BIT(0) /* Reset all MAC */ /* AVE_CFGR */ #define AVE_CFGR_FLE BIT(31) /* Filter Function */ #define AVE_CFGR_CHE BIT(30) /* Checksum Function */ #define AVE_CFGR_MII BIT(27) /* Func mode (1:MII/RMII, 0:RGMII) */ #define AVE_CFGR_IPFCEN BIT(24) /* IP fragment sum Enable */ /* AVE_GISR (common with GIMR) */ #define AVE_GI_PHY BIT(24) /* PHY interrupt */ #define AVE_GI_TX BIT(16) /* Tx complete */ #define AVE_GI_RXERR BIT(8) /* Receive frame more than max size */ #define AVE_GI_RXOVF BIT(7) /* Overflow at the RxFIFO */ #define AVE_GI_RXDROP BIT(6) /* Drop packet */ #define AVE_GI_RXIINT BIT(5) /* Interval interrupt */ /* AVE_TXCR */ #define AVE_TXCR_FLOCTR BIT(18) /* Flow control */ #define AVE_TXCR_TXSPD_1G BIT(17) #define AVE_TXCR_TXSPD_100 BIT(16) /* AVE_RXCR */ #define AVE_RXCR_RXEN BIT(30) /* Rx enable */ #define AVE_RXCR_FDUPEN BIT(22) /* Interface mode */ #define AVE_RXCR_FLOCTR BIT(21) /* Flow control */ #define AVE_RXCR_AFEN BIT(19) /* MAC address filter */ #define AVE_RXCR_DRPEN BIT(18) /* Drop pause frame */ #define AVE_RXCR_MPSIZ_MASK GENMASK(10, 0) /* AVE_MDIOCTR */ #define AVE_MDIOCTR_RREQ BIT(3) /* Read request */ #define AVE_MDIOCTR_WREQ BIT(2) /* Write request */ /* AVE_MDIOSR */ #define AVE_MDIOSR_STS BIT(0) /* access status */ /* AVE_DESCC */ #define AVE_DESCC_STATUS_MASK GENMASK(31, 16) #define AVE_DESCC_RD0 BIT(8) /* Enable Rx descriptor Ring0 */ #define AVE_DESCC_RDSTP BIT(4) /* Pause Rx descriptor */ #define AVE_DESCC_TD BIT(0) /* Enable Tx descriptor */ /* AVE_TXDC */ #define AVE_TXDC_SIZE GENMASK(27, 16) /* Size of Tx descriptor */ #define AVE_TXDC_ADDR GENMASK(11, 0) /* Start address */ #define AVE_TXDC_ADDR_START 0 /* AVE_RXDC0 */ #define AVE_RXDC0_SIZE GENMASK(30, 16) /* Size of Rx descriptor */ #define AVE_RXDC0_ADDR GENMASK(14, 0) /* Start address */ #define AVE_RXDC0_ADDR_START 0 /* AVE_IIRQC */ #define AVE_IIRQC_EN0 BIT(27) /* Enable interval interrupt Ring0 */ #define AVE_IIRQC_BSCK GENMASK(15, 0) /* Interval count unit */ /* Command status for descriptor */ #define AVE_STS_OWN BIT(31) /* Descriptor ownership */ #define AVE_STS_INTR BIT(29) /* Request for interrupt */ #define AVE_STS_OK BIT(27) /* Normal transmit */ /* TX */ #define AVE_STS_NOCSUM BIT(28) /* No use HW checksum */ #define AVE_STS_1ST BIT(26) /* Head of buffer chain */ #define AVE_STS_LAST BIT(25) /* Tail of buffer chain */ #define AVE_STS_OWC BIT(21) /* Out of window,Late Collision */ #define AVE_STS_EC BIT(20) /* Excess collision occurred */ #define AVE_STS_PKTLEN_TX_MASK GENMASK(15, 0) /* RX */ #define AVE_STS_CSSV BIT(21) /* Checksum check performed */ #define AVE_STS_CSER BIT(20) /* Checksum error detected */ #define AVE_STS_PKTLEN_RX_MASK GENMASK(10, 0) /* Packet filter */ #define AVE_PFMBYTE_MASK0 (GENMASK(31, 8) | GENMASK(5, 0)) #define AVE_PFMBYTE_MASK1 GENMASK(25, 0) #define AVE_PFMBIT_MASK GENMASK(15, 0) #define AVE_PF_SIZE 17 /* Number of all packet filter */ #define AVE_PF_MULTICAST_SIZE 7 /* Number of multicast filter */ #define AVE_PFNUM_FILTER 0 /* No.0 */ #define AVE_PFNUM_UNICAST 1 /* No.1 */ #define AVE_PFNUM_BROADCAST 2 /* No.2 */ #define AVE_PFNUM_MULTICAST 11 /* No.11-17 */ /* NETIF Message control */ #define AVE_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK | \ NETIF_MSG_TIMER | \ NETIF_MSG_IFDOWN | \ NETIF_MSG_IFUP | \ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) /* Parameter for descriptor */ #define AVE_NR_TXDESC 64 /* Tx descriptor */ #define AVE_NR_RXDESC 256 /* Rx descriptor */ #define AVE_DESC_OFS_CMDSTS 0 #define AVE_DESC_OFS_ADDRL 4 #define AVE_DESC_OFS_ADDRU 8 /* Parameter for ethernet frame */ #define AVE_MAX_ETHFRAME 1518 #define AVE_FRAME_HEADROOM 2 /* Parameter for interrupt */ #define AVE_INTM_COUNT 20 #define AVE_FORCE_TXINTCNT 1 /* SG */ #define SG_ETPINMODE 0x540 #define SG_ETPINMODE_EXTPHY BIT(1) /* for LD11 */ #define SG_ETPINMODE_RMII(ins) BIT(ins) #define IS_DESC_64BIT(p) ((p)->data->is_desc_64bit) #define AVE_MAX_CLKS 4 #define AVE_MAX_RSTS 2 enum desc_id { AVE_DESCID_RX, AVE_DESCID_TX, }; enum desc_state { AVE_DESC_RX_PERMIT, AVE_DESC_RX_SUSPEND, AVE_DESC_START, AVE_DESC_STOP, }; struct ave_desc { struct sk_buff *skbs; dma_addr_t skbs_dma; size_t skbs_dmalen; }; struct ave_desc_info { u32 ndesc; /* number of descriptor */ u32 daddr; /* start address of descriptor */ u32 proc_idx; /* index of processing packet */ u32 done_idx; /* index of processed packet */ struct ave_desc *desc; /* skb info related descriptor */ }; struct ave_stats { struct u64_stats_sync syncp; u64 packets; u64 bytes; u64 errors; u64 dropped; u64 collisions; u64 fifo_errors; }; struct ave_private { void __iomem *base; int irq; int phy_id; unsigned int desc_size; u32 msg_enable; int nclks; struct clk *clk[AVE_MAX_CLKS]; int nrsts; struct reset_control *rst[AVE_MAX_RSTS]; phy_interface_t phy_mode; struct phy_device *phydev; struct mii_bus *mdio; struct regmap *regmap; unsigned int pinmode_mask; unsigned int pinmode_val; u32 wolopts; /* stats */ struct ave_stats stats_rx; struct ave_stats stats_tx; /* NAPI support */ struct net_device *ndev; struct napi_struct napi_rx; struct napi_struct napi_tx; /* descriptor */ struct ave_desc_info rx; struct ave_desc_info tx; /* flow control */ int pause_auto; int pause_rx; int pause_tx; const struct ave_soc_data *data; }; struct ave_soc_data { bool is_desc_64bit; const char *clock_names[AVE_MAX_CLKS]; const char *reset_names[AVE_MAX_RSTS]; int (*get_pinmode)(struct ave_private *priv, phy_interface_t phy_mode, u32 arg); }; static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry, int offset) { struct ave_private *priv = netdev_priv(ndev); u32 addr; addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr) + entry * priv->desc_size + offset; return readl(priv->base + addr); } static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id, int entry) { return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS); } static void ave_desc_write(struct net_device *ndev, enum desc_id id, int entry, int offset, u32 val) { struct ave_private *priv = netdev_priv(ndev); u32 addr; addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr) + entry * priv->desc_size + offset; writel(val, priv->base + addr); } static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id, int entry, u32 val) { ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val); } static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id, int entry, dma_addr_t paddr) { struct ave_private *priv = netdev_priv(ndev); ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL, lower_32_bits(paddr)); if (IS_DESC_64BIT(priv)) ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRU, upper_32_bits(paddr)); } static u32 ave_irq_disable_all(struct net_device *ndev) { struct ave_private *priv = netdev_priv(ndev); u32 ret; ret = readl(priv->base + AVE_GIMR); writel(0, priv->base + AVE_GIMR); return ret; } static void ave_irq_restore(struct net_device *ndev, u32 val) { struct ave_private *priv = netdev_priv(ndev); writel(val, priv->base + AVE_GIMR); } static void ave_irq_enable(struct net_device *ndev, u32 bitflag) { struct ave_private *priv = netdev_priv(ndev); writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR); writel(bitflag, priv->base + AVE_GISR); } static void ave_hw_write_macaddr(struct net_device *ndev, const unsigned char *mac_addr, int reg1, int reg2) { struct ave_private *priv = netdev_priv(ndev); writel(mac_addr[0] | mac_addr[1] << 8 | mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1); writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2); } static void ave_hw_read_version(struct net_device *ndev, char *buf, int len) { struct ave_private *priv = netdev_priv(ndev); u32 major, minor, vr; vr = readl(priv->base + AVE_VR); major = (vr & GENMASK(15, 8)) >> 8; minor = (vr & GENMASK(7, 0)); snprintf(buf, len, "v%u.%u", major, minor); } static void ave_ethtool_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { struct device *dev = ndev->dev.parent; strscpy(info->driver, dev->driver->name, sizeof(info->driver)); strscpy(info->bus_info, dev_name(dev), sizeof(info->bus_info)); ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version)); } static u32 ave_ethtool_get_msglevel(struct net_device *ndev) { struct ave_private *priv = netdev_priv(ndev); return priv->msg_enable; } static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val) { struct ave_private *priv = netdev_priv(ndev); priv->msg_enable = val; } static void ave_ethtool_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { wol->supported = 0; wol->wolopts = 0; if (ndev->phydev) phy_ethtool_get_wol(ndev->phydev, wol); } static int __ave_ethtool_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { if (!ndev->phydev || (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))) return -EOPNOTSUPP; return phy_ethtool_set_wol(ndev->phydev, wol); } static int ave_ethtool_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { int ret; ret = __ave_ethtool_set_wol(ndev, wol); if (!ret) device_set_wakeup_enable(&ndev->dev, !!wol->wolopts); return ret; } static void ave_ethtool_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { struct ave_private *priv = netdev_priv(ndev); pause->autoneg = priv->pause_auto; pause->rx_pause = priv->pause_rx; pause->tx_pause = priv->pause_tx; } static int ave_ethtool_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { struct ave_private *priv = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; if (!phydev) return -EINVAL; priv->pause_auto = pause->autoneg; priv->pause_rx = pause->rx_pause; priv->pause_tx = pause->tx_pause; phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause); return 0; } static const struct ethtool_ops ave_ethtool_ops = { .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, .get_drvinfo = ave_ethtool_get_drvinfo, .nway_reset = phy_ethtool_nway_reset, .get_link = ethtool_op_get_link, .get_msglevel = ave_ethtool_get_msglevel, .set_msglevel = ave_ethtool_set_msglevel, .get_wol = ave_ethtool_get_wol, .set_wol = ave_ethtool_set_wol, .get_pauseparam = ave_ethtool_get_pauseparam, .set_pauseparam = ave_ethtool_set_pauseparam, }; static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum) { struct net_device *ndev = bus->priv; struct ave_private *priv; u32 mdioctl, mdiosr; int ret; priv = netdev_priv(ndev); /* write address */ writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR); /* read request */ mdioctl = readl(priv->base + AVE_MDIOCTR); writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ, priv->base + AVE_MDIOCTR); ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr, !(mdiosr & AVE_MDIOSR_STS), 20, 2000); if (ret) { netdev_err(ndev, "failed to read (phy:%d reg:%x)\n", phyid, regnum); return ret; } return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0); } static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum, u16 val) { struct net_device *ndev = bus->priv; struct ave_private *priv; u32 mdioctl, mdiosr; int ret; priv = netdev_priv(ndev); /* write address */ writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR); /* write data */ writel(val, priv->base + AVE_MDIOWDR); /* write request */ mdioctl = readl(priv->base + AVE_MDIOCTR); writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ, priv->base + AVE_MDIOCTR); ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr, !(mdiosr & AVE_MDIOSR_STS), 20, 2000); if (ret) netdev_err(ndev, "failed to write (phy:%d reg:%x)\n", phyid, regnum); return ret; } static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc, void *ptr, size_t len, enum dma_data_direction dir, dma_addr_t *paddr) { dma_addr_t map_addr; map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir); if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr))) return -ENOMEM; desc->skbs_dma = map_addr; desc->skbs_dmalen = len; *paddr = map_addr; return 0; } static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc, enum dma_data_direction dir) { if (!desc->skbs_dma) return; dma_unmap_single(ndev->dev.parent, desc->skbs_dma, desc->skbs_dmalen, dir); desc->skbs_dma = 0; } /* Prepare Rx descriptor and memory */ static int ave_rxdesc_prepare(struct net_device *ndev, int entry) { struct ave_private *priv = netdev_priv(ndev); struct sk_buff *skb; dma_addr_t paddr; int ret; skb = priv->rx.desc[entry].skbs; if (!skb) { skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME); if (!skb) { netdev_err(ndev, "can't allocate skb for Rx\n"); return -ENOMEM; } skb->data += AVE_FRAME_HEADROOM; skb->tail += AVE_FRAME_HEADROOM; } /* set disable to cmdsts */ ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry, AVE_STS_INTR | AVE_STS_OWN); /* map Rx buffer * Rx buffer set to the Rx descriptor has two restrictions: * - Rx buffer address is 4 byte aligned. * - Rx buffer begins with 2 byte headroom, and data will be put from * (buffer + 2). * To satisfy this, specify the address to put back the buffer * pointer advanced by AVE_FRAME_HEADROOM, and expand the map size * by AVE_FRAME_HEADROOM. */ ret = ave_dma_map(ndev, &priv->rx.desc[entry], skb->data - AVE_FRAME_HEADROOM, AVE_MAX_ETHFRAME + AVE_FRAME_HEADROOM, DMA_FROM_DEVICE, &paddr); if (ret) { netdev_err(ndev, "can't map skb for Rx\n"); dev_kfree_skb_any(skb); return ret; } priv->rx.desc[entry].skbs = skb; /* set buffer pointer */ ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr); /* set enable to cmdsts */ ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry, AVE_STS_INTR | AVE_MAX_ETHFRAME); return ret; } /* Switch state of descriptor */ static int ave_desc_switch(struct net_device *ndev, enum desc_state state) { struct ave_private *priv = netdev_priv(ndev); int ret = 0; u32 val; switch (state) { case AVE_DESC_START: writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC); break; case AVE_DESC_STOP: writel(0, priv->base + AVE_DESCC); if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val, 150, 15000)) { netdev_err(ndev, "can't stop descriptor\n"); ret = -EBUSY; } break; case AVE_DESC_RX_SUSPEND: val = readl(priv->base + AVE_DESCC); val |= AVE_DESCC_RDSTP; val &= ~AVE_DESCC_STATUS_MASK; writel(val, priv->base + AVE_DESCC); if (readl_poll_timeout(priv->base + AVE_DESCC, val, val & (AVE_DESCC_RDSTP << 16), 150, 150000)) { netdev_err(ndev, "can't suspend descriptor\n"); ret = -EBUSY; } break; case AVE_DESC_RX_PERMIT: val = readl(priv->base + AVE_DESCC); val &= ~AVE_DESCC_RDSTP; val &= ~AVE_DESCC_STATUS_MASK; writel(val, priv->base + AVE_DESCC); break; default: ret = -EINVAL; break; } return ret; } static int ave_tx_complete(struct net_device *ndev) { struct ave_private *priv = netdev_priv(ndev); u32 proc_idx, done_idx, ndesc, cmdsts; unsigned int nr_freebuf = 0; unsigned int tx_packets = 0; unsigned int tx_bytes = 0; proc_idx = priv->tx.proc_idx; done_idx = priv->tx.done_idx; ndesc = priv->tx.ndesc; /* free pre-stored skb from done_idx to proc_idx */ while (proc_idx != done_idx) { cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx); /* do nothing if owner is HW (==1 for Tx) */ if (cmdsts & AVE_STS_OWN) break; /* check Tx status and updates statistics */ if (cmdsts & AVE_STS_OK) { tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK; /* success */ if (cmdsts & AVE_STS_LAST) tx_packets++; } else { /* error */ if (cmdsts & AVE_STS_LAST) { priv->stats_tx.errors++; if (cmdsts & (AVE_STS_OWC | AVE_STS_EC)) priv->stats_tx.collisions++; } } /* release skb */ if (priv->tx.desc[done_idx].skbs) { ave_dma_unmap(ndev, &priv->tx.desc[done_idx], DMA_TO_DEVICE); dev_consume_skb_any(priv->tx.desc[done_idx].skbs); priv->tx.desc[done_idx].skbs = NULL; nr_freebuf++; } done_idx = (done_idx + 1) % ndesc; } priv->tx.done_idx = done_idx; /* update stats */ u64_stats_update_begin(&priv->stats_tx.syncp); priv->stats_tx.packets += tx_packets; priv->stats_tx.bytes += tx_bytes; u64_stats_update_end(&priv->stats_tx.syncp); /* wake queue for freeing buffer */ if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf) netif_wake_queue(ndev); return nr_freebuf; } static int ave_rx_receive(struct net_device *ndev, int num) { struct ave_private *priv = netdev_priv(ndev); unsigned int rx_packets = 0; unsigned int rx_bytes = 0; u32 proc_idx, done_idx; struct sk_buff *skb; unsigned int pktlen; int restpkt, npkts; u32 ndesc, cmdsts; proc_idx = priv->rx.proc_idx; done_idx = priv->rx.done_idx; ndesc = priv->rx.ndesc; restpkt = ((proc_idx + ndesc - 1) - done_idx) % ndesc; for (npkts = 0; npkts < num; npkts++) { /* we can't receive more packet, so fill desc quickly */ if (--restpkt < 0) break; cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx); /* do nothing if owner is HW (==0 for Rx) */ if (!(cmdsts & AVE_STS_OWN)) break; if (!(cmdsts & AVE_STS_OK)) { priv->stats_rx.errors++; proc_idx = (proc_idx + 1) % ndesc; continue; } pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK; /* get skbuff for rx */ skb = priv->rx.desc[proc_idx].skbs; priv->rx.desc[proc_idx].skbs = NULL; ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE); skb->dev = ndev; skb_put(skb, pktlen); skb->protocol = eth_type_trans(skb, ndev); if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER))) skb->ip_summed = CHECKSUM_UNNECESSARY; rx_packets++; rx_bytes += pktlen; netif_receive_skb(skb); proc_idx = (proc_idx + 1) % ndesc; } priv->rx.proc_idx = proc_idx; /* update stats */ u64_stats_update_begin(&priv->stats_rx.syncp); priv->stats_rx.packets += rx_packets; priv->stats_rx.bytes += rx_bytes; u64_stats_update_end(&priv->stats_rx.syncp); /* refill the Rx buffers */ while (proc_idx != done_idx) { if (ave_rxdesc_prepare(ndev, done_idx)) break; done_idx = (done_idx + 1) % ndesc; } priv->rx.done_idx = done_idx; return npkts; } static int ave_napi_poll_rx(struct napi_struct *napi, int budget) { struct ave_private *priv; struct net_device *ndev; int num; priv = container_of(napi, struct ave_private, napi_rx); ndev = priv->ndev; num = ave_rx_receive(ndev, budget); if (num < budget) { napi_complete_done(napi, num); /* enable Rx interrupt when NAPI finishes */ ave_irq_enable(ndev, AVE_GI_RXIINT); } return num; } static int ave_napi_poll_tx(struct napi_struct *napi, int budget) { struct ave_private *priv; struct net_device *ndev; int num; priv = container_of(napi, struct ave_private, napi_tx); ndev = priv->ndev; num = ave_tx_complete(ndev); napi_complete(napi); /* enable Tx interrupt when NAPI finishes */ ave_irq_enable(ndev, AVE_GI_TX); return num; } static void ave_global_reset(struct net_device *ndev) { struct ave_private *priv = netdev_priv(ndev); u32 val; /* set config register */ val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE; if (!phy_interface_mode_is_rgmii(priv->phy_mode)) val |= AVE_CFGR_MII; writel(val, priv->base + AVE_CFGR); /* reset RMII register */ val = readl(priv->base + AVE_RSTCTRL); val &= ~AVE_RSTCTRL_RMIIRST; writel(val, priv->base + AVE_RSTCTRL); /* assert reset */ writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR); msleep(20); /* 1st, negate PHY reset only */ writel(AVE_GRR_GRST, priv->base + AVE_GRR); msleep(40); /* negate reset */ writel(0, priv->base + AVE_GRR); msleep(40); /* negate RMII register */ val = readl(priv->base + AVE_RSTCTRL); val |= AVE_RSTCTRL_RMIIRST; writel(val, priv->base + AVE_RSTCTRL); ave_irq_disable_all(ndev); } static void ave_rxfifo_reset(struct net_device *ndev) { struct ave_private *priv = netdev_priv(ndev); u32 rxcr_org; /* save and disable MAC receive op */ rxcr_org = readl(priv->base + AVE_RXCR); writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR); /* suspend Rx descriptor */ ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND); /* receive all packets before descriptor starts */ ave_rx_receive(ndev, priv->rx.ndesc); /* assert reset */ writel(AVE_GRR_RXFFR, priv->base + AVE_GRR); udelay(50); /* negate reset */ writel(0, priv->base + AVE_GRR); udelay(20); /* negate interrupt status */ writel(AVE_GI_RXOVF, priv->base + AVE_GISR); /* permit descriptor */ ave_desc_switch(ndev, AVE_DESC_RX_PERMIT); /* restore MAC reccieve op */ writel(rxcr_org, priv->base + AVE_RXCR); } static irqreturn_t ave_irq_handler(int irq, void *netdev) { struct net_device *ndev = (struct net_device *)netdev; struct ave_private *priv = netdev_priv(ndev); u32 gimr_val, gisr_val; gimr_val = ave_irq_disable_all(ndev); /* get interrupt status */ gisr_val = readl(priv->base + AVE_GISR); /* PHY */ if (gisr_val & AVE_GI_PHY) writel(AVE_GI_PHY, priv->base + AVE_GISR); /* check exceeding packet */ if (gisr_val & AVE_GI_RXERR) { writel(AVE_GI_RXERR, priv->base + AVE_GISR); netdev_err(ndev, "receive a packet exceeding frame buffer\n"); } gisr_val &= gimr_val; if (!gisr_val) goto exit_isr; /* RxFIFO overflow */ if (gisr_val & AVE_GI_RXOVF) { priv->stats_rx.fifo_errors++; ave_rxfifo_reset(ndev); goto exit_isr; } /* Rx drop */ if (gisr_val & AVE_GI_RXDROP) { priv->stats_rx.dropped++; writel(AVE_GI_RXDROP, priv->base + AVE_GISR); } /* Rx interval */ if (gisr_val & AVE_GI_RXIINT) { napi_schedule(&priv->napi_rx); /* still force to disable Rx interrupt until NAPI finishes */ gimr_val &= ~AVE_GI_RXIINT; } /* Tx completed */ if (gisr_val & AVE_GI_TX) { napi_schedule(&priv->napi_tx); /* still force to disable Tx interrupt until NAPI finishes */ gimr_val &= ~AVE_GI_TX; } exit_isr: ave_irq_restore(ndev, gimr_val); return IRQ_HANDLED; } static int ave_pfsel_start(struct net_device *ndev, unsigned int entry) { struct ave_private *priv = netdev_priv(ndev); u32 val; if (WARN_ON(entry > AVE_PF_SIZE)) return -EINVAL; val = readl(priv->base + AVE_PFEN); writel(val | BIT(entry), priv->base + AVE_PFEN); return 0; } static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry) { struct ave_private *priv = netdev_priv(ndev); u32 val; if (WARN_ON(entry > AVE_PF_SIZE)) return -EINVAL; val = readl(priv->base + AVE_PFEN); writel(val & ~BIT(entry), priv->base + AVE_PFEN); return 0; } static int ave_pfsel_set_macaddr(struct net_device *ndev, unsigned int entry, const unsigned char *mac_addr, unsigned int set_size) { struct ave_private *priv = netdev_priv(ndev); if (WARN_ON(entry > AVE_PF_SIZE)) return -EINVAL; if (WARN_ON(set_size > 6)) return -EINVAL; ave_pfsel_stop(ndev, entry); /* set MAC address for the filter */ ave_hw_write_macaddr(ndev, mac_addr, AVE_PKTF(entry), AVE_PKTF(entry) + 4); /* set byte mask */ writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry)); writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4); /* set bit mask filter */ writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry)); /* set selector to ring 0 */ writel(0, priv->base + AVE_PFSEL(entry)); /* restart filter */ ave_pfsel_start(ndev, entry); return 0; } static void ave_pfsel_set_promisc(struct net_device *ndev, unsigned int entry, u32 rxring) { struct ave_private *priv = netdev_priv(ndev); if (WARN_ON(entry > AVE_PF_SIZE)) return; ave_pfsel_stop(ndev, entry); /* set byte mask */ writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry)); writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4); /* set bit mask filter */ writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry)); /* set selector to rxring */ writel(rxring, priv->base + AVE_PFSEL(entry)); ave_pfsel_start(ndev, entry); } static void ave_pfsel_init(struct net_device *ndev) { unsigned char bcast_mac[ETH_ALEN]; int i; eth_broadcast_addr(bcast_mac); for (i = 0; i < AVE_PF_SIZE; i++) ave_pfsel_stop(ndev, i); /* promiscious entry, select ring 0 */ ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0); /* unicast entry */ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6); /* broadcast entry */ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6); } static void ave_phy_adjust_link(struct net_device *ndev) { struct ave_private *priv = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; u32 val, txcr, rxcr, rxcr_org; u16 rmt_adv = 0, lcl_adv = 0; u8 cap; /* set RGMII speed */ val = readl(priv->base + AVE_TXCR); val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G); if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000) val |= AVE_TXCR_TXSPD_1G; else if (phydev->speed == SPEED_100) val |= AVE_TXCR_TXSPD_100; writel(val, priv->base + AVE_TXCR); /* set RMII speed (100M/10M only) */ if (!phy_interface_is_rgmii(phydev)) { val = readl(priv->base + AVE_LINKSEL); if (phydev->speed == SPEED_10) val &= ~AVE_LINKSEL_100M; else val |= AVE_LINKSEL_100M; writel(val, priv->base + AVE_LINKSEL); } /* check current RXCR/TXCR */ rxcr = readl(priv->base + AVE_RXCR); txcr = readl(priv->base + AVE_TXCR); rxcr_org = rxcr; if (phydev->duplex) { rxcr |= AVE_RXCR_FDUPEN; if (phydev->pause) rmt_adv |= LPA_PAUSE_CAP; if (phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (cap & FLOW_CTRL_TX) txcr |= AVE_TXCR_FLOCTR; else txcr &= ~AVE_TXCR_FLOCTR; if (cap & FLOW_CTRL_RX) rxcr |= AVE_RXCR_FLOCTR; else rxcr &= ~AVE_RXCR_FLOCTR; } else { rxcr &= ~AVE_RXCR_FDUPEN; rxcr &= ~AVE_RXCR_FLOCTR; txcr &= ~AVE_TXCR_FLOCTR; } if (rxcr_org != rxcr) { /* disable Rx mac */ writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR); /* change and enable TX/Rx mac */ writel(txcr, priv->base + AVE_TXCR); writel(rxcr, priv->base + AVE_RXCR); } phy_print_status(phydev); } static void ave_macaddr_init(struct net_device *ndev) { ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R); /* pfsel unicast entry */ ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6); } static int ave_init(struct net_device *ndev) { struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; struct ave_private *priv = netdev_priv(ndev); struct device *dev = ndev->dev.parent; struct device_node *np = dev->of_node; struct device_node *mdio_np; struct phy_device *phydev; int nc, nr, ret; /* enable clk because of hw access until ndo_open */ for (nc = 0; nc < priv->nclks; nc++) { ret = clk_prepare_enable(priv->clk[nc]); if (ret) { dev_err(dev, "can't enable clock\n"); goto out_clk_disable; } } for (nr = 0; nr < priv->nrsts; nr++) { ret = reset_control_deassert(priv->rst[nr]); if (ret) { dev_err(dev, "can't deassert reset\n"); goto out_reset_assert; } } ret = regmap_update_bits(priv->regmap, SG_ETPINMODE, priv->pinmode_mask, priv->pinmode_val); if (ret) goto out_reset_assert; ave_global_reset(ndev); mdio_np = of_get_child_by_name(np, "mdio"); if (!mdio_np) { dev_err(dev, "mdio node not found\n"); ret = -EINVAL; goto out_reset_assert; } ret = of_mdiobus_register(priv->mdio, mdio_np); of_node_put(mdio_np); if (ret) { dev_err(dev, "failed to register mdiobus\n"); goto out_reset_assert; } phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link); if (!phydev) { dev_err(dev, "could not attach to PHY\n"); ret = -ENODEV; goto out_mdio_unregister; } priv->phydev = phydev; ave_ethtool_get_wol(ndev, &wol); device_set_wakeup_capable(&ndev->dev, !!wol.supported); /* set wol initial state disabled */ wol.wolopts = 0; __ave_ethtool_set_wol(ndev, &wol); if (!phy_interface_is_rgmii(phydev)) phy_set_max_speed(phydev, SPEED_100); phy_support_asym_pause(phydev); phydev->mac_managed_pm = true; phy_attached_info(phydev); return 0; out_mdio_unregister: mdiobus_unregister(priv->mdio); out_reset_assert: while (--nr >= 0) reset_control_assert(priv->rst[nr]); out_clk_disable: while (--nc >= 0) clk_disable_unprepare(priv->clk[nc]); return ret; } static void ave_uninit(struct net_device *ndev) { struct ave_private *priv = netdev_priv(ndev); int i; phy_disconnect(priv->phydev); mdiobus_unregister(priv->mdio); /* disable clk because of hw access after ndo_stop */ for (i = 0; i < priv->nrsts; i++) reset_control_assert(priv->rst[i]); for (i = 0; i < priv->nclks; i++) clk_disable_unprepare(priv->clk[i]); } static int ave_open(struct net_device *ndev) { struct ave_private *priv = netdev_priv(ndev); int entry; int ret; u32 val; ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name, ndev); if (ret) return ret; priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc), GFP_KERNEL); if (!priv->tx.desc) { ret = -ENOMEM; goto out_free_irq; } priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc), GFP_KERNEL); if (!priv->rx.desc) { kfree(priv->tx.desc); ret = -ENOMEM; goto out_free_irq; } /* initialize Tx work and descriptor */ priv->tx.proc_idx = 0; priv->tx.done_idx = 0; for (entry = 0; entry < priv->tx.ndesc; entry++) { ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0); ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0); } writel(AVE_TXDC_ADDR_START | (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE), priv->base + AVE_TXDC); /* initialize Rx work and descriptor */ priv->rx.proc_idx = 0; priv->rx.done_idx = 0; for (entry = 0; entry < priv->rx.ndesc; entry++) { if (ave_rxdesc_prepare(ndev, entry)) break; } writel(AVE_RXDC0_ADDR_START | (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE), priv->base + AVE_RXDC0); ave_desc_switch(ndev, AVE_DESC_START); ave_pfsel_init(ndev); ave_macaddr_init(ndev); /* set Rx configuration */ /* full duplex, enable pause drop, enalbe flow control */ val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN | AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK); writel(val, priv->base + AVE_RXCR); /* set Tx configuration */ /* enable flow control, disable loopback */ writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR); /* enable timer, clear EN,INTM, and mask interval unit(BSCK) */ val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK; val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16); writel(val, priv->base + AVE_IIRQC); val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP; ave_irq_restore(ndev, val); napi_enable(&priv->napi_rx); napi_enable(&priv->napi_tx); phy_start(ndev->phydev); phy_start_aneg(ndev->phydev); netif_start_queue(ndev); return 0; out_free_irq: disable_irq(priv->irq); free_irq(priv->irq, ndev); return ret; } static int ave_stop(struct net_device *ndev) { struct ave_private *priv = netdev_priv(ndev); int entry; ave_irq_disable_all(ndev); disable_irq(priv->irq); free_irq(priv->irq, ndev); netif_tx_disable(ndev); phy_stop(ndev->phydev); napi_disable(&priv->napi_tx); napi_disable(&priv->napi_rx); ave_desc_switch(ndev, AVE_DESC_STOP); /* free Tx buffer */ for (entry = 0; entry < priv->tx.ndesc; entry++) { if (!priv->tx.desc[entry].skbs) continue; ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE); dev_kfree_skb_any(priv->tx.desc[entry].skbs); priv->tx.desc[entry].skbs = NULL; } priv->tx.proc_idx = 0; priv->tx.done_idx = 0; /* free Rx buffer */ for (entry = 0; entry < priv->rx.ndesc; entry++) { if (!priv->rx.desc[entry].skbs) continue; ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE); dev_kfree_skb_any(priv->rx.desc[entry].skbs); priv->rx.desc[entry].skbs = NULL; } priv->rx.proc_idx = 0; priv->rx.done_idx = 0; kfree(priv->tx.desc); kfree(priv->rx.desc); return 0; } static netdev_tx_t ave_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ave_private *priv = netdev_priv(ndev); u32 proc_idx, done_idx, ndesc, cmdsts; int ret, freepkt; dma_addr_t paddr; proc_idx = priv->tx.proc_idx; done_idx = priv->tx.done_idx; ndesc = priv->tx.ndesc; freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc; /* stop queue when not enough entry */ if (unlikely(freepkt < 1)) { netif_stop_queue(ndev); return NETDEV_TX_BUSY; } /* add padding for short packet */ if (skb_put_padto(skb, ETH_ZLEN)) { priv->stats_tx.dropped++; return NETDEV_TX_OK; } /* map Tx buffer * Tx buffer set to the Tx descriptor doesn't have any restriction. */ ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx], skb->data, skb->len, DMA_TO_DEVICE, &paddr); if (ret) { dev_kfree_skb_any(skb); priv->stats_tx.dropped++; return NETDEV_TX_OK; } priv->tx.desc[proc_idx].skbs = skb; ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr); cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST | (skb->len & AVE_STS_PKTLEN_TX_MASK); /* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */ if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev)) cmdsts |= AVE_STS_INTR; /* disable checksum calculation when skb doesn't calurate checksum */ if (skb->ip_summed == CHECKSUM_NONE || skb->ip_summed == CHECKSUM_UNNECESSARY) cmdsts |= AVE_STS_NOCSUM; ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts); priv->tx.proc_idx = (proc_idx + 1) % ndesc; return NETDEV_TX_OK; } static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) { return phy_mii_ioctl(ndev->phydev, ifr, cmd); } static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 }; static void ave_set_rx_mode(struct net_device *ndev) { struct ave_private *priv = netdev_priv(ndev); struct netdev_hw_addr *hw_adr; int count, mc_cnt; u32 val; /* MAC addr filter enable for promiscious mode */ mc_cnt = netdev_mc_count(ndev); val = readl(priv->base + AVE_RXCR); if (ndev->flags & IFF_PROMISC || !mc_cnt) val &= ~AVE_RXCR_AFEN; else val |= AVE_RXCR_AFEN; writel(val, priv->base + AVE_RXCR); /* set all multicast address */ if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) { ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST, v4multi_macadr, 1); ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1, v6multi_macadr, 1); } else { /* stop all multicast filter */ for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++) ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count); /* set multicast addresses */ count = 0; netdev_for_each_mc_addr(hw_adr, ndev) { if (count == mc_cnt) break; ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count, hw_adr->addr, 6); count++; } } } static void ave_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) { struct ave_private *priv = netdev_priv(ndev); unsigned int start; do { start = u64_stats_fetch_begin(&priv->stats_rx.syncp); stats->rx_packets = priv->stats_rx.packets; stats->rx_bytes = priv->stats_rx.bytes; } while (u64_stats_fetch_retry(&priv->stats_rx.syncp, start)); do { start = u64_stats_fetch_begin(&priv->stats_tx.syncp); stats->tx_packets = priv->stats_tx.packets; stats->tx_bytes = priv->stats_tx.bytes; } while (u64_stats_fetch_retry(&priv->stats_tx.syncp, start)); stats->rx_errors = priv->stats_rx.errors; stats->tx_errors = priv->stats_tx.errors; stats->rx_dropped = priv->stats_rx.dropped; stats->tx_dropped = priv->stats_tx.dropped; stats->rx_fifo_errors = priv->stats_rx.fifo_errors; stats->collisions = priv->stats_tx.collisions; } static int ave_set_mac_address(struct net_device *ndev, void *p) { int ret = eth_mac_addr(ndev, p); if (ret) return ret; ave_macaddr_init(ndev); return 0; } static const struct net_device_ops ave_netdev_ops = { .ndo_init = ave_init, .ndo_uninit = ave_uninit, .ndo_open = ave_open, .ndo_stop = ave_stop, .ndo_start_xmit = ave_start_xmit, .ndo_eth_ioctl = ave_ioctl, .ndo_set_rx_mode = ave_set_rx_mode, .ndo_get_stats64 = ave_get_stats64, .ndo_set_mac_address = ave_set_mac_address, }; static int ave_probe(struct platform_device *pdev) { const struct ave_soc_data *data; struct device *dev = &pdev->dev; char buf[ETHTOOL_FWVERS_LEN]; struct of_phandle_args args; phy_interface_t phy_mode; struct ave_private *priv; struct net_device *ndev; struct device_node *np; void __iomem *base; const char *name; int i, irq, ret; u64 dma_mask; u32 ave_id; data = of_device_get_match_data(dev); if (WARN_ON(!data)) return -EINVAL; np = dev->of_node; ret = of_get_phy_mode(np, &phy_mode); if (ret) { dev_err(dev, "phy-mode not found\n"); return ret; } irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); ndev = devm_alloc_etherdev(dev, sizeof(struct ave_private)); if (!ndev) { dev_err(dev, "can't allocate ethernet device\n"); return -ENOMEM; } ndev->netdev_ops = &ave_netdev_ops; ndev->ethtool_ops = &ave_ethtool_ops; SET_NETDEV_DEV(ndev, dev); ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM); ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM); ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN); ret = of_get_ethdev_address(np, ndev); if (ret) { /* if the mac address is invalid, use random mac address */ eth_hw_addr_random(ndev); dev_warn(dev, "Using random MAC address: %pM\n", ndev->dev_addr); } priv = netdev_priv(ndev); priv->base = base; priv->irq = irq; priv->ndev = ndev; priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE); priv->phy_mode = phy_mode; priv->data = data; if (IS_DESC_64BIT(priv)) { priv->desc_size = AVE_DESC_SIZE_64; priv->tx.daddr = AVE_TXDM_64; priv->rx.daddr = AVE_RXDM_64; dma_mask = DMA_BIT_MASK(64); } else { priv->desc_size = AVE_DESC_SIZE_32; priv->tx.daddr = AVE_TXDM_32; priv->rx.daddr = AVE_RXDM_32; dma_mask = DMA_BIT_MASK(32); } ret = dma_set_mask(dev, dma_mask); if (ret) return ret; priv->tx.ndesc = AVE_NR_TXDESC; priv->rx.ndesc = AVE_NR_RXDESC; u64_stats_init(&priv->stats_tx.syncp); u64_stats_init(&priv->stats_rx.syncp); for (i = 0; i < AVE_MAX_CLKS; i++) { name = priv->data->clock_names[i]; if (!name) break; priv->clk[i] = devm_clk_get(dev, name); if (IS_ERR(priv->clk[i])) return PTR_ERR(priv->clk[i]); priv->nclks++; } for (i = 0; i < AVE_MAX_RSTS; i++) { name = priv->data->reset_names[i]; if (!name) break; priv->rst[i] = devm_reset_control_get_shared(dev, name); if (IS_ERR(priv->rst[i])) return PTR_ERR(priv->rst[i]); priv->nrsts++; } ret = of_parse_phandle_with_fixed_args(np, "socionext,syscon-phy-mode", 1, 0, &args); if (ret) { dev_err(dev, "can't get syscon-phy-mode property\n"); return ret; } priv->regmap = syscon_node_to_regmap(args.np); of_node_put(args.np); if (IS_ERR(priv->regmap)) { dev_err(dev, "can't map syscon-phy-mode\n"); return PTR_ERR(priv->regmap); } ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]); if (ret) { dev_err(dev, "invalid phy-mode setting\n"); return ret; } priv->mdio = devm_mdiobus_alloc(dev); if (!priv->mdio) return -ENOMEM; priv->mdio->priv = ndev; priv->mdio->parent = dev; priv->mdio->read = ave_mdiobus_read; priv->mdio->write = ave_mdiobus_write; priv->mdio->name = "uniphier-mdio"; snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id); /* Register as a NAPI supported driver */ netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx); netif_napi_add_tx(ndev, &priv->napi_tx, ave_napi_poll_tx); platform_set_drvdata(pdev, ndev); ret = register_netdev(ndev); if (ret) { dev_err(dev, "failed to register netdevice\n"); goto out_del_napi; } /* get ID and version */ ave_id = readl(priv->base + AVE_IDR); ave_hw_read_version(ndev, buf, sizeof(buf)); dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n", (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff, (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff, buf, priv->irq, phy_modes(phy_mode)); return 0; out_del_napi: netif_napi_del(&priv->napi_rx); netif_napi_del(&priv->napi_tx); return ret; } static int ave_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ave_private *priv = netdev_priv(ndev); unregister_netdev(ndev); netif_napi_del(&priv->napi_rx); netif_napi_del(&priv->napi_tx); return 0; } #ifdef CONFIG_PM_SLEEP static int ave_suspend(struct device *dev) { struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; struct net_device *ndev = dev_get_drvdata(dev); struct ave_private *priv = netdev_priv(ndev); int ret = 0; if (netif_running(ndev)) { ret = ave_stop(ndev); netif_device_detach(ndev); } ave_ethtool_get_wol(ndev, &wol); priv->wolopts = wol.wolopts; return ret; } static int ave_resume(struct device *dev) { struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; struct net_device *ndev = dev_get_drvdata(dev); struct ave_private *priv = netdev_priv(ndev); int ret = 0; ave_global_reset(ndev); ret = phy_init_hw(ndev->phydev); if (ret) return ret; ave_ethtool_get_wol(ndev, &wol); wol.wolopts = priv->wolopts; __ave_ethtool_set_wol(ndev, &wol); if (netif_running(ndev)) { ret = ave_open(ndev); netif_device_attach(ndev); } return ret; } static SIMPLE_DEV_PM_OPS(ave_pm_ops, ave_suspend, ave_resume); #define AVE_PM_OPS (&ave_pm_ops) #else #define AVE_PM_OPS NULL #endif static int ave_pro4_get_pinmode(struct ave_private *priv, phy_interface_t phy_mode, u32 arg) { if (arg > 0) return -EINVAL; priv->pinmode_mask = SG_ETPINMODE_RMII(0); switch (phy_mode) { case PHY_INTERFACE_MODE_RMII: priv->pinmode_val = SG_ETPINMODE_RMII(0); break; case PHY_INTERFACE_MODE_MII: case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: priv->pinmode_val = 0; break; default: return -EINVAL; } return 0; } static int ave_ld11_get_pinmode(struct ave_private *priv, phy_interface_t phy_mode, u32 arg) { if (arg > 0) return -EINVAL; priv->pinmode_mask = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0); switch (phy_mode) { case PHY_INTERFACE_MODE_INTERNAL: priv->pinmode_val = 0; break; case PHY_INTERFACE_MODE_RMII: priv->pinmode_val = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0); break; default: return -EINVAL; } return 0; } static int ave_ld20_get_pinmode(struct ave_private *priv, phy_interface_t phy_mode, u32 arg) { if (arg > 0) return -EINVAL; priv->pinmode_mask = SG_ETPINMODE_RMII(0); switch (phy_mode) { case PHY_INTERFACE_MODE_RMII: priv->pinmode_val = SG_ETPINMODE_RMII(0); break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: priv->pinmode_val = 0; break; default: return -EINVAL; } return 0; } static int ave_pxs3_get_pinmode(struct ave_private *priv, phy_interface_t phy_mode, u32 arg) { if (arg > 1) return -EINVAL; priv->pinmode_mask = SG_ETPINMODE_RMII(arg); switch (phy_mode) { case PHY_INTERFACE_MODE_RMII: priv->pinmode_val = SG_ETPINMODE_RMII(arg); break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: priv->pinmode_val = 0; break; default: return -EINVAL; } return 0; } static const struct ave_soc_data ave_pro4_data = { .is_desc_64bit = false, .clock_names = { "gio", "ether", "ether-gb", "ether-phy", }, .reset_names = { "gio", "ether", }, .get_pinmode = ave_pro4_get_pinmode, }; static const struct ave_soc_data ave_pxs2_data = { .is_desc_64bit = false, .clock_names = { "ether", }, .reset_names = { "ether", }, .get_pinmode = ave_pro4_get_pinmode, }; static const struct ave_soc_data ave_ld11_data = { .is_desc_64bit = false, .clock_names = { "ether", }, .reset_names = { "ether", }, .get_pinmode = ave_ld11_get_pinmode, }; static const struct ave_soc_data ave_ld20_data = { .is_desc_64bit = true, .clock_names = { "ether", }, .reset_names = { "ether", }, .get_pinmode = ave_ld20_get_pinmode, }; static const struct ave_soc_data ave_pxs3_data = { .is_desc_64bit = false, .clock_names = { "ether", }, .reset_names = { "ether", }, .get_pinmode = ave_pxs3_get_pinmode, }; static const struct ave_soc_data ave_nx1_data = { .is_desc_64bit = true, .clock_names = { "ether", }, .reset_names = { "ether", }, .get_pinmode = ave_pxs3_get_pinmode, }; static const struct of_device_id of_ave_match[] = { { .compatible = "socionext,uniphier-pro4-ave4", .data = &ave_pro4_data, }, { .compatible = "socionext,uniphier-pxs2-ave4", .data = &ave_pxs2_data, }, { .compatible = "socionext,uniphier-ld11-ave4", .data = &ave_ld11_data, }, { .compatible = "socionext,uniphier-ld20-ave4", .data = &ave_ld20_data, }, { .compatible = "socionext,uniphier-pxs3-ave4", .data = &ave_pxs3_data, }, { .compatible = "socionext,uniphier-nx1-ave4", .data = &ave_nx1_data, }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, of_ave_match); static struct platform_driver ave_driver = { .probe = ave_probe, .remove = ave_remove, .driver = { .name = "ave", .pm = AVE_PM_OPS, .of_match_table = of_ave_match, }, }; module_platform_driver(ave_driver); MODULE_AUTHOR("Kunihiko Hayashi <[email protected]>"); MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/ethernet/socionext/sni_ave.c
// SPDX-License-Identifier: GPL-2.0+ #include <linux/types.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/acpi.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/etherdevice.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/netlink.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <net/tcp.h> #include <net/page_pool/helpers.h> #include <net/ip6_checksum.h> #define NETSEC_REG_SOFT_RST 0x104 #define NETSEC_REG_COM_INIT 0x120 #define NETSEC_REG_TOP_STATUS 0x200 #define NETSEC_IRQ_RX BIT(1) #define NETSEC_IRQ_TX BIT(0) #define NETSEC_REG_TOP_INTEN 0x204 #define NETSEC_REG_INTEN_SET 0x234 #define NETSEC_REG_INTEN_CLR 0x238 #define NETSEC_REG_NRM_TX_STATUS 0x400 #define NETSEC_REG_NRM_TX_INTEN 0x404 #define NETSEC_REG_NRM_TX_INTEN_SET 0x428 #define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c #define NRM_TX_ST_NTOWNR BIT(17) #define NRM_TX_ST_TR_ERR BIT(16) #define NRM_TX_ST_TXDONE BIT(15) #define NRM_TX_ST_TMREXP BIT(14) #define NETSEC_REG_NRM_RX_STATUS 0x440 #define NETSEC_REG_NRM_RX_INTEN 0x444 #define NETSEC_REG_NRM_RX_INTEN_SET 0x468 #define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c #define NRM_RX_ST_RC_ERR BIT(16) #define NRM_RX_ST_PKTCNT BIT(15) #define NRM_RX_ST_TMREXP BIT(14) #define NETSEC_REG_PKT_CMD_BUF 0xd0 #define NETSEC_REG_CLK_EN 0x100 #define NETSEC_REG_PKT_CTRL 0x140 #define NETSEC_REG_DMA_TMR_CTRL 0x20c #define NETSEC_REG_F_TAIKI_MC_VER 0x22c #define NETSEC_REG_F_TAIKI_VER 0x230 #define NETSEC_REG_DMA_HM_CTRL 0x214 #define NETSEC_REG_DMA_MH_CTRL 0x220 #define NETSEC_REG_ADDR_DIS_CORE 0x218 #define NETSEC_REG_DMAC_HM_CMD_BUF 0x210 #define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c #define NETSEC_REG_NRM_TX_PKTCNT 0x410 #define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414 #define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418 #define NETSEC_REG_NRM_TX_TMR 0x41c #define NETSEC_REG_NRM_RX_PKTCNT 0x454 #define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458 #define NETSEC_REG_NRM_TX_TXINT_TMR 0x420 #define NETSEC_REG_NRM_RX_RXINT_TMR 0x460 #define NETSEC_REG_NRM_RX_TMR 0x45c #define NETSEC_REG_NRM_TX_DESC_START_UP 0x434 #define NETSEC_REG_NRM_TX_DESC_START_LW 0x408 #define NETSEC_REG_NRM_RX_DESC_START_UP 0x474 #define NETSEC_REG_NRM_RX_DESC_START_LW 0x448 #define NETSEC_REG_NRM_TX_CONFIG 0x430 #define NETSEC_REG_NRM_RX_CONFIG 0x470 #define MAC_REG_STATUS 0x1024 #define MAC_REG_DATA 0x11c0 #define MAC_REG_CMD 0x11c4 #define MAC_REG_FLOW_TH 0x11cc #define MAC_REG_INTF_SEL 0x11d4 #define MAC_REG_DESC_INIT 0x11fc #define MAC_REG_DESC_SOFT_RST 0x1204 #define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500 #define GMAC_REG_MCR 0x0000 #define GMAC_REG_MFFR 0x0004 #define GMAC_REG_GAR 0x0010 #define GMAC_REG_GDR 0x0014 #define GMAC_REG_FCR 0x0018 #define GMAC_REG_BMR 0x1000 #define GMAC_REG_RDLAR 0x100c #define GMAC_REG_TDLAR 0x1010 #define GMAC_REG_OMR 0x1018 #define MHZ(n) ((n) * 1000 * 1000) #define NETSEC_TX_SHIFT_OWN_FIELD 31 #define NETSEC_TX_SHIFT_LD_FIELD 30 #define NETSEC_TX_SHIFT_DRID_FIELD 24 #define NETSEC_TX_SHIFT_PT_FIELD 21 #define NETSEC_TX_SHIFT_TDRID_FIELD 16 #define NETSEC_TX_SHIFT_CC_FIELD 15 #define NETSEC_TX_SHIFT_FS_FIELD 9 #define NETSEC_TX_LAST 8 #define NETSEC_TX_SHIFT_CO 7 #define NETSEC_TX_SHIFT_SO 6 #define NETSEC_TX_SHIFT_TRS_FIELD 4 #define NETSEC_RX_PKT_OWN_FIELD 31 #define NETSEC_RX_PKT_LD_FIELD 30 #define NETSEC_RX_PKT_SDRID_FIELD 24 #define NETSEC_RX_PKT_FR_FIELD 23 #define NETSEC_RX_PKT_ER_FIELD 21 #define NETSEC_RX_PKT_ERR_FIELD 16 #define NETSEC_RX_PKT_TDRID_FIELD 12 #define NETSEC_RX_PKT_FS_FIELD 9 #define NETSEC_RX_PKT_LS_FIELD 8 #define NETSEC_RX_PKT_CO_FIELD 6 #define NETSEC_RX_PKT_ERR_MASK 3 #define NETSEC_MAX_TX_PKT_LEN 1518 #define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018 #define NETSEC_RING_GMAC 15 #define NETSEC_RING_MAX 2 #define NETSEC_TCP_SEG_LEN_MAX 1460 #define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960 #define NETSEC_RX_CKSUM_NOTAVAIL 0 #define NETSEC_RX_CKSUM_OK 1 #define NETSEC_RX_CKSUM_NG 2 #define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20) #define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4) #define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20) #define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19) #define NETSEC_INT_PKTCNT_MAX 2047 #define NETSEC_FLOW_START_TH_MAX 95 #define NETSEC_FLOW_STOP_TH_MAX 95 #define NETSEC_FLOW_PAUSE_TIME_MIN 5 #define NETSEC_CLK_EN_REG_DOM_ALL 0x3f #define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28) #define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27) #define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3) #define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2) #define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1) #define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0) #define NETSEC_CLK_EN_REG_DOM_G BIT(5) #define NETSEC_CLK_EN_REG_DOM_C BIT(1) #define NETSEC_CLK_EN_REG_DOM_D BIT(0) #define NETSEC_COM_INIT_REG_DB BIT(2) #define NETSEC_COM_INIT_REG_CLS BIT(1) #define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \ NETSEC_COM_INIT_REG_DB) #define NETSEC_SOFT_RST_REG_RESET 0 #define NETSEC_SOFT_RST_REG_RUN BIT(31) #define NETSEC_DMA_CTRL_REG_STOP 1 #define MH_CTRL__MODE_TRANS BIT(20) #define NETSEC_GMAC_CMD_ST_READ 0 #define NETSEC_GMAC_CMD_ST_WRITE BIT(28) #define NETSEC_GMAC_CMD_ST_BUSY BIT(31) #define NETSEC_GMAC_BMR_REG_COMMON 0x00412080 #define NETSEC_GMAC_BMR_REG_RESET 0x00020181 #define NETSEC_GMAC_BMR_REG_SWR 0x00000001 #define NETSEC_GMAC_OMR_REG_ST BIT(13) #define NETSEC_GMAC_OMR_REG_SR BIT(1) #define NETSEC_GMAC_MCR_REG_IBN BIT(30) #define NETSEC_GMAC_MCR_REG_CST BIT(25) #define NETSEC_GMAC_MCR_REG_JE BIT(20) #define NETSEC_MCR_PS BIT(15) #define NETSEC_GMAC_MCR_REG_FES BIT(14) #define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c #define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c #define NETSEC_FCR_RFE BIT(2) #define NETSEC_FCR_TFE BIT(1) #define NETSEC_GMAC_GAR_REG_GW BIT(1) #define NETSEC_GMAC_GAR_REG_GB BIT(0) #define NETSEC_GMAC_GAR_REG_SHIFT_PA 11 #define NETSEC_GMAC_GAR_REG_SHIFT_GR 6 #define GMAC_REG_SHIFT_CR_GAR 2 #define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2 #define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3 #define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0 #define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1 #define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4 #define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5 #define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000 #define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000 #define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000 #define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31) #define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30) #define NETSEC_REG_DESC_TMR_MODE 4 #define NETSEC_REG_DESC_ENDIAN 0 #define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1 #define NETSEC_MAC_DESC_INIT_REG_INIT 1 #define NETSEC_EEPROM_MAC_ADDRESS 0x00 #define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08 #define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C #define NETSEC_EEPROM_HM_ME_SIZE 0x10 #define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14 #define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18 #define NETSEC_EEPROM_MH_ME_SIZE 0x1C #define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20 #define NETSEC_EEPROM_PKT_ME_SIZE 0x24 #define DESC_NUM 256 #define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) #define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \ NET_IP_ALIGN) #define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define NETSEC_RX_BUF_SIZE (PAGE_SIZE - NETSEC_RX_BUF_NON_DATA) #define DESC_SZ sizeof(struct netsec_de) #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000) #define NETSEC_XDP_PASS 0 #define NETSEC_XDP_CONSUMED BIT(0) #define NETSEC_XDP_TX BIT(1) #define NETSEC_XDP_REDIR BIT(2) enum ring_id { NETSEC_RING_TX = 0, NETSEC_RING_RX }; enum buf_type { TYPE_NETSEC_SKB = 0, TYPE_NETSEC_XDP_TX, TYPE_NETSEC_XDP_NDO, }; struct netsec_desc { union { struct sk_buff *skb; struct xdp_frame *xdpf; }; dma_addr_t dma_addr; void *addr; u16 len; u8 buf_type; }; struct netsec_desc_ring { dma_addr_t desc_dma; struct netsec_desc *desc; void *vaddr; u16 head, tail; u16 xdp_xmit; /* netsec_xdp_xmit packets */ struct page_pool *page_pool; struct xdp_rxq_info xdp_rxq; spinlock_t lock; /* XDP tx queue locking */ }; struct netsec_priv { struct netsec_desc_ring desc_ring[NETSEC_RING_MAX]; struct ethtool_coalesce et_coalesce; struct bpf_prog *xdp_prog; spinlock_t reglock; /* protect reg access */ struct napi_struct napi; phy_interface_t phy_interface; struct net_device *ndev; struct device_node *phy_np; struct phy_device *phydev; struct mii_bus *mii_bus; void __iomem *ioaddr; void __iomem *eeprom_base; struct device *dev; struct clk *clk; u32 msg_enable; u32 freq; u32 phy_addr; bool rx_cksum_offload_flag; }; struct netsec_de { /* Netsec Descriptor layout */ u32 attr; u32 data_buf_addr_up; u32 data_buf_addr_lw; u32 buf_len_info; }; struct netsec_tx_pkt_ctrl { u16 tcp_seg_len; bool tcp_seg_offload_flag; bool cksum_offload_flag; }; struct netsec_rx_pkt_info { int rx_cksum_result; int err_code; bool err_flag; }; static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val) { writel(val, priv->ioaddr + reg_addr); } static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr) { return readl(priv->ioaddr + reg_addr); } /************* MDIO BUS OPS FOLLOW *************/ #define TIMEOUT_SPINS_MAC 1000 #define TIMEOUT_SECONDARY_MS_MAC 100 static u32 netsec_clk_type(u32 freq) { if (freq < MHZ(35)) return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ; if (freq < MHZ(60)) return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ; if (freq < MHZ(100)) return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ; if (freq < MHZ(150)) return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ; if (freq < MHZ(250)) return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ; return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ; } static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask) { u32 timeout = TIMEOUT_SPINS_MAC; while (--timeout && netsec_read(priv, addr) & mask) cpu_relax(); if (timeout) return 0; timeout = TIMEOUT_SECONDARY_MS_MAC; while (--timeout && netsec_read(priv, addr) & mask) usleep_range(1000, 2000); if (timeout) return 0; netdev_WARN(priv->ndev, "%s: timeout\n", __func__); return -ETIMEDOUT; } static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value) { netsec_write(priv, MAC_REG_DATA, value); netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE); return netsec_wait_while_busy(priv, MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY); } static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read) { int ret; netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ); ret = netsec_wait_while_busy(priv, MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY); if (ret) return ret; *read = netsec_read(priv, MAC_REG_DATA); return 0; } static int netsec_mac_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask) { u32 timeout = TIMEOUT_SPINS_MAC; int ret, data; do { ret = netsec_mac_read(priv, addr, &data); if (ret) break; cpu_relax(); } while (--timeout && (data & mask)); if (timeout) return 0; timeout = TIMEOUT_SECONDARY_MS_MAC; do { usleep_range(1000, 2000); ret = netsec_mac_read(priv, addr, &data); if (ret) break; cpu_relax(); } while (--timeout && (data & mask)); if (timeout && !ret) return 0; netdev_WARN(priv->ndev, "%s: timeout\n", __func__); return -ETIMEDOUT; } static int netsec_mac_update_to_phy_state(struct netsec_priv *priv) { struct phy_device *phydev = priv->ndev->phydev; u32 value = 0; value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON : NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON; if (phydev->speed != SPEED_1000) value |= NETSEC_MCR_PS; if (priv->phy_interface != PHY_INTERFACE_MODE_GMII && phydev->speed == SPEED_100) value |= NETSEC_GMAC_MCR_REG_FES; value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE; if (phy_interface_mode_is_rgmii(priv->phy_interface)) value |= NETSEC_GMAC_MCR_REG_IBN; if (netsec_mac_write(priv, GMAC_REG_MCR, value)) return -ETIMEDOUT; return 0; } static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr); static int netsec_phy_write(struct mii_bus *bus, int phy_addr, int reg, u16 val) { int status; struct netsec_priv *priv = bus->priv; if (netsec_mac_write(priv, GMAC_REG_GDR, val)) return -ETIMEDOUT; if (netsec_mac_write(priv, GMAC_REG_GAR, phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA | reg << NETSEC_GMAC_GAR_REG_SHIFT_GR | NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB | (netsec_clk_type(priv->freq) << GMAC_REG_SHIFT_CR_GAR))) return -ETIMEDOUT; status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB); /* Developerbox implements RTL8211E PHY and there is * a compatibility problem with F_GMAC4. * RTL8211E expects MDC clock must be kept toggling for several * clock cycle with MDIO high before entering the IDLE state. * To meet this requirement, netsec driver needs to issue dummy * read(e.g. read PHYID1(offset 0x2) register) right after write. */ netsec_phy_read(bus, phy_addr, MII_PHYSID1); return status; } static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr) { struct netsec_priv *priv = bus->priv; u32 data; int ret; if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB | phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA | reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR | (netsec_clk_type(priv->freq) << GMAC_REG_SHIFT_CR_GAR))) return -ETIMEDOUT; ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB); if (ret) return ret; ret = netsec_mac_read(priv, GMAC_REG_GDR, &data); if (ret) return ret; return data; } /************* ETHTOOL_OPS FOLLOW *************/ static void netsec_et_get_drvinfo(struct net_device *net_device, struct ethtool_drvinfo *info) { strscpy(info->driver, "netsec", sizeof(info->driver)); strscpy(info->bus_info, dev_name(net_device->dev.parent), sizeof(info->bus_info)); } static int netsec_et_get_coalesce(struct net_device *net_device, struct ethtool_coalesce *et_coalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct netsec_priv *priv = netdev_priv(net_device); *et_coalesce = priv->et_coalesce; return 0; } static int netsec_et_set_coalesce(struct net_device *net_device, struct ethtool_coalesce *et_coalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct netsec_priv *priv = netdev_priv(net_device); priv->et_coalesce = *et_coalesce; if (priv->et_coalesce.tx_coalesce_usecs < 50) priv->et_coalesce.tx_coalesce_usecs = 50; if (priv->et_coalesce.tx_max_coalesced_frames < 1) priv->et_coalesce.tx_max_coalesced_frames = 1; netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT, priv->et_coalesce.tx_max_coalesced_frames); netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR, priv->et_coalesce.tx_coalesce_usecs); netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE); netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP); if (priv->et_coalesce.rx_coalesce_usecs < 50) priv->et_coalesce.rx_coalesce_usecs = 50; if (priv->et_coalesce.rx_max_coalesced_frames < 1) priv->et_coalesce.rx_max_coalesced_frames = 1; netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT, priv->et_coalesce.rx_max_coalesced_frames); netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR, priv->et_coalesce.rx_coalesce_usecs); netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT); netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP); return 0; } static u32 netsec_et_get_msglevel(struct net_device *dev) { struct netsec_priv *priv = netdev_priv(dev); return priv->msg_enable; } static void netsec_et_set_msglevel(struct net_device *dev, u32 datum) { struct netsec_priv *priv = netdev_priv(dev); priv->msg_enable = datum; } static const struct ethtool_ops netsec_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, .get_drvinfo = netsec_et_get_drvinfo, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, .get_link = ethtool_op_get_link, .get_coalesce = netsec_et_get_coalesce, .set_coalesce = netsec_et_set_coalesce, .get_msglevel = netsec_et_get_msglevel, .set_msglevel = netsec_et_set_msglevel, }; /************* NETDEV_OPS FOLLOW *************/ static void netsec_set_rx_de(struct netsec_priv *priv, struct netsec_desc_ring *dring, u16 idx, const struct netsec_desc *desc) { struct netsec_de *de = dring->vaddr + DESC_SZ * idx; u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) | (1 << NETSEC_RX_PKT_FS_FIELD) | (1 << NETSEC_RX_PKT_LS_FIELD); if (idx == DESC_NUM - 1) attr |= (1 << NETSEC_RX_PKT_LD_FIELD); de->data_buf_addr_up = upper_32_bits(desc->dma_addr); de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); de->buf_len_info = desc->len; de->attr = attr; dma_wmb(); dring->desc[idx].dma_addr = desc->dma_addr; dring->desc[idx].addr = desc->addr; dring->desc[idx].len = desc->len; } static bool netsec_clean_tx_dring(struct netsec_priv *priv) { struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; struct xdp_frame_bulk bq; struct netsec_de *entry; int tail = dring->tail; unsigned int bytes; int cnt = 0; spin_lock(&dring->lock); bytes = 0; xdp_frame_bulk_init(&bq); entry = dring->vaddr + DESC_SZ * tail; rcu_read_lock(); /* need for xdp_return_frame_bulk */ while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) && cnt < DESC_NUM) { struct netsec_desc *desc; int eop; desc = &dring->desc[tail]; eop = (entry->attr >> NETSEC_TX_LAST) & 1; dma_rmb(); /* if buf_type is either TYPE_NETSEC_SKB or * TYPE_NETSEC_XDP_NDO we mapped it */ if (desc->buf_type != TYPE_NETSEC_XDP_TX) dma_unmap_single(priv->dev, desc->dma_addr, desc->len, DMA_TO_DEVICE); if (!eop) goto next; if (desc->buf_type == TYPE_NETSEC_SKB) { bytes += desc->skb->len; dev_kfree_skb(desc->skb); } else { bytes += desc->xdpf->len; if (desc->buf_type == TYPE_NETSEC_XDP_TX) xdp_return_frame_rx_napi(desc->xdpf); else xdp_return_frame_bulk(desc->xdpf, &bq); } next: /* clean up so netsec_uninit_pkt_dring() won't free the skb * again */ *desc = (struct netsec_desc){}; /* entry->attr is not going to be accessed by the NIC until * netsec_set_tx_de() is called. No need for a dma_wmb() here */ entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD; /* move tail ahead */ dring->tail = (tail + 1) % DESC_NUM; tail = dring->tail; entry = dring->vaddr + DESC_SZ * tail; cnt++; } xdp_flush_frame_bulk(&bq); rcu_read_unlock(); spin_unlock(&dring->lock); if (!cnt) return false; /* reading the register clears the irq */ netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT); priv->ndev->stats.tx_packets += cnt; priv->ndev->stats.tx_bytes += bytes; netdev_completed_queue(priv->ndev, cnt, bytes); return true; } static void netsec_process_tx(struct netsec_priv *priv) { struct net_device *ndev = priv->ndev; bool cleaned; cleaned = netsec_clean_tx_dring(priv); if (cleaned && netif_queue_stopped(ndev)) { /* Make sure we update the value, anyone stopping the queue * after this will read the proper consumer idx */ smp_wmb(); netif_wake_queue(ndev); } } static void *netsec_alloc_rx_data(struct netsec_priv *priv, dma_addr_t *dma_handle, u16 *desc_len) { struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; struct page *page; page = page_pool_dev_alloc_pages(dring->page_pool); if (!page) return NULL; /* We allocate the same buffer length for XDP and non-XDP cases. * page_pool API will map the whole page, skip what's needed for * network payloads and/or XDP */ *dma_handle = page_pool_get_dma_addr(page) + NETSEC_RXBUF_HEADROOM; /* Make sure the incoming payload fits in the page for XDP and non-XDP * cases and reserve enough space for headroom + skb_shared_info */ *desc_len = NETSEC_RX_BUF_SIZE; return page_address(page); } static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num) { struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; u16 idx = from; while (num) { netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]); idx++; if (idx >= DESC_NUM) idx = 0; num--; } } static void netsec_xdp_ring_tx_db(struct netsec_priv *priv, u16 pkts) { if (likely(pkts)) netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, pkts); } static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res, u16 pkts) { if (xdp_res & NETSEC_XDP_REDIR) xdp_do_flush_map(); if (xdp_res & NETSEC_XDP_TX) netsec_xdp_ring_tx_db(priv, pkts); } static void netsec_set_tx_de(struct netsec_priv *priv, struct netsec_desc_ring *dring, const struct netsec_tx_pkt_ctrl *tx_ctrl, const struct netsec_desc *desc, void *buf) { int idx = dring->head; struct netsec_de *de; u32 attr; de = dring->vaddr + (DESC_SZ * idx); attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) | (1 << NETSEC_TX_SHIFT_PT_FIELD) | (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) | (1 << NETSEC_TX_SHIFT_FS_FIELD) | (1 << NETSEC_TX_LAST) | (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) | (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) | (1 << NETSEC_TX_SHIFT_TRS_FIELD); if (idx == DESC_NUM - 1) attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD); de->data_buf_addr_up = upper_32_bits(desc->dma_addr); de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len; de->attr = attr; dring->desc[idx] = *desc; if (desc->buf_type == TYPE_NETSEC_SKB) dring->desc[idx].skb = buf; else if (desc->buf_type == TYPE_NETSEC_XDP_TX || desc->buf_type == TYPE_NETSEC_XDP_NDO) dring->desc[idx].xdpf = buf; /* move head ahead */ dring->head = (dring->head + 1) % DESC_NUM; } /* The current driver only supports 1 Txq, this should run under spin_lock() */ static u32 netsec_xdp_queue_one(struct netsec_priv *priv, struct xdp_frame *xdpf, bool is_ndo) { struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; struct page *page = virt_to_page(xdpf->data); struct netsec_tx_pkt_ctrl tx_ctrl = {}; struct netsec_desc tx_desc; dma_addr_t dma_handle; u16 filled; if (tx_ring->head >= tx_ring->tail) filled = tx_ring->head - tx_ring->tail; else filled = tx_ring->head + DESC_NUM - tx_ring->tail; if (DESC_NUM - filled <= 1) return NETSEC_XDP_CONSUMED; if (is_ndo) { /* this is for ndo_xdp_xmit, the buffer needs mapping before * sending */ dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); if (dma_mapping_error(priv->dev, dma_handle)) return NETSEC_XDP_CONSUMED; tx_desc.buf_type = TYPE_NETSEC_XDP_NDO; } else { /* This is the device Rx buffer from page_pool. No need to remap * just sync and send it */ struct netsec_desc_ring *rx_ring = &priv->desc_ring[NETSEC_RING_RX]; enum dma_data_direction dma_dir = page_pool_get_dma_dir(rx_ring->page_pool); dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom + sizeof(*xdpf); dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len, dma_dir); tx_desc.buf_type = TYPE_NETSEC_XDP_TX; } tx_desc.dma_addr = dma_handle; tx_desc.addr = xdpf->data; tx_desc.len = xdpf->len; netdev_sent_queue(priv->ndev, xdpf->len); netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf); return NETSEC_XDP_TX; } static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp) { struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); u32 ret; if (unlikely(!xdpf)) return NETSEC_XDP_CONSUMED; spin_lock(&tx_ring->lock); ret = netsec_xdp_queue_one(priv, xdpf, false); spin_unlock(&tx_ring->lock); return ret; } static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog, struct xdp_buff *xdp) { struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; unsigned int sync, len = xdp->data_end - xdp->data; u32 ret = NETSEC_XDP_PASS; struct page *page; int err; u32 act; act = bpf_prog_run_xdp(prog, xdp); /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM; sync = max(sync, len); switch (act) { case XDP_PASS: ret = NETSEC_XDP_PASS; break; case XDP_TX: ret = netsec_xdp_xmit_back(priv, xdp); if (ret != NETSEC_XDP_TX) { page = virt_to_head_page(xdp->data); page_pool_put_page(dring->page_pool, page, sync, true); } break; case XDP_REDIRECT: err = xdp_do_redirect(priv->ndev, xdp, prog); if (!err) { ret = NETSEC_XDP_REDIR; } else { ret = NETSEC_XDP_CONSUMED; page = virt_to_head_page(xdp->data); page_pool_put_page(dring->page_pool, page, sync, true); } break; default: bpf_warn_invalid_xdp_action(priv->ndev, prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(priv->ndev, prog, act); fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: ret = NETSEC_XDP_CONSUMED; page = virt_to_head_page(xdp->data); page_pool_put_page(dring->page_pool, page, sync, true); break; } return ret; } static int netsec_process_rx(struct netsec_priv *priv, int budget) { struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; struct net_device *ndev = priv->ndev; struct netsec_rx_pkt_info rx_info; enum dma_data_direction dma_dir; struct bpf_prog *xdp_prog; struct xdp_buff xdp; u16 xdp_xmit = 0; u32 xdp_act = 0; int done = 0; xdp_init_buff(&xdp, PAGE_SIZE, &dring->xdp_rxq); xdp_prog = READ_ONCE(priv->xdp_prog); dma_dir = page_pool_get_dma_dir(dring->page_pool); while (done < budget) { u16 idx = dring->tail; struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); struct netsec_desc *desc = &dring->desc[idx]; struct page *page = virt_to_page(desc->addr); u32 xdp_result = NETSEC_XDP_PASS; struct sk_buff *skb = NULL; u16 pkt_len, desc_len; dma_addr_t dma_handle; void *buf_addr; if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) { /* reading the register clears the irq */ netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT); break; } /* This barrier is needed to keep us from reading * any other fields out of the netsec_de until we have * verified the descriptor has been written back */ dma_rmb(); done++; pkt_len = de->buf_len_info >> 16; rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) & NETSEC_RX_PKT_ERR_MASK; rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1; if (rx_info.err_flag) { netif_err(priv, drv, priv->ndev, "%s: rx fail err(%d)\n", __func__, rx_info.err_code); ndev->stats.rx_dropped++; dring->tail = (dring->tail + 1) % DESC_NUM; /* reuse buffer page frag */ netsec_rx_fill(priv, idx, 1); continue; } rx_info.rx_cksum_result = (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3; /* allocate a fresh buffer and map it to the hardware. * This will eventually replace the old buffer in the hardware */ buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len); if (unlikely(!buf_addr)) break; dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len, dma_dir); prefetch(desc->addr); xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM, pkt_len, false); if (xdp_prog) { xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp); if (xdp_result != NETSEC_XDP_PASS) { xdp_act |= xdp_result; if (xdp_result == NETSEC_XDP_TX) xdp_xmit++; goto next; } } skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA); if (unlikely(!skb)) { /* If skb fails recycle_direct will either unmap and * free the page or refill the cache depending on the * cache state. Since we paid the allocation cost if * building an skb fails try to put the page into cache */ page_pool_put_page(dring->page_pool, page, pkt_len, true); netif_err(priv, drv, priv->ndev, "rx failed to build skb\n"); break; } skb_mark_for_recycle(skb); skb_reserve(skb, xdp.data - xdp.data_hard_start); skb_put(skb, xdp.data_end - xdp.data); skb->protocol = eth_type_trans(skb, priv->ndev); if (priv->rx_cksum_offload_flag && rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK) skb->ip_summed = CHECKSUM_UNNECESSARY; next: if (skb) napi_gro_receive(&priv->napi, skb); if (skb || xdp_result) { ndev->stats.rx_packets++; ndev->stats.rx_bytes += xdp.data_end - xdp.data; } /* Update the descriptor with fresh buffers */ desc->len = desc_len; desc->dma_addr = dma_handle; desc->addr = buf_addr; netsec_rx_fill(priv, idx, 1); dring->tail = (dring->tail + 1) % DESC_NUM; } netsec_finalize_xdp_rx(priv, xdp_act, xdp_xmit); return done; } static int netsec_napi_poll(struct napi_struct *napi, int budget) { struct netsec_priv *priv; int done; priv = container_of(napi, struct netsec_priv, napi); netsec_process_tx(priv); done = netsec_process_rx(priv, budget); if (done < budget && napi_complete_done(napi, done)) { unsigned long flags; spin_lock_irqsave(&priv->reglock, flags); netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX); spin_unlock_irqrestore(&priv->reglock, flags); } return done; } static int netsec_desc_used(struct netsec_desc_ring *dring) { int used; if (dring->head >= dring->tail) used = dring->head - dring->tail; else used = dring->head + DESC_NUM - dring->tail; return used; } static int netsec_check_stop_tx(struct netsec_priv *priv, int used) { struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; /* keep tail from touching the queue */ if (DESC_NUM - used < 2) { netif_stop_queue(priv->ndev); /* Make sure we read the updated value in case * descriptors got freed */ smp_rmb(); used = netsec_desc_used(dring); if (DESC_NUM - used < 2) return NETDEV_TX_BUSY; netif_wake_queue(priv->ndev); } return 0; } static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct netsec_priv *priv = netdev_priv(ndev); struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; struct netsec_tx_pkt_ctrl tx_ctrl = {}; struct netsec_desc tx_desc; u16 tso_seg_len = 0; int filled; spin_lock_bh(&dring->lock); filled = netsec_desc_used(dring); if (netsec_check_stop_tx(priv, filled)) { spin_unlock_bh(&dring->lock); net_warn_ratelimited("%s %s Tx queue full\n", dev_name(priv->dev), ndev->name); return NETDEV_TX_BUSY; } if (skb->ip_summed == CHECKSUM_PARTIAL) tx_ctrl.cksum_offload_flag = true; if (skb_is_gso(skb)) tso_seg_len = skb_shinfo(skb)->gso_size; if (tso_seg_len > 0) { if (skb->protocol == htons(ETH_P_IP)) { ip_hdr(skb)->tot_len = 0; tcp_hdr(skb)->check = ~tcp_v4_check(0, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 0); } else { tcp_v6_gso_csum_prep(skb); } tx_ctrl.tcp_seg_offload_flag = true; tx_ctrl.tcp_seg_len = tso_seg_len; } tx_desc.dma_addr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) { spin_unlock_bh(&dring->lock); netif_err(priv, drv, priv->ndev, "%s: DMA mapping failed\n", __func__); ndev->stats.tx_dropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } tx_desc.addr = skb->data; tx_desc.len = skb_headlen(skb); tx_desc.buf_type = TYPE_NETSEC_SKB; skb_tx_timestamp(skb); netdev_sent_queue(priv->ndev, skb->len); netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb); spin_unlock_bh(&dring->lock); netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */ return NETDEV_TX_OK; } static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id) { struct netsec_desc_ring *dring = &priv->desc_ring[id]; struct netsec_desc *desc; u16 idx; if (!dring->vaddr || !dring->desc) return; for (idx = 0; idx < DESC_NUM; idx++) { desc = &dring->desc[idx]; if (!desc->addr) continue; if (id == NETSEC_RING_RX) { struct page *page = virt_to_page(desc->addr); page_pool_put_full_page(dring->page_pool, page, false); } else if (id == NETSEC_RING_TX) { dma_unmap_single(priv->dev, desc->dma_addr, desc->len, DMA_TO_DEVICE); dev_kfree_skb(desc->skb); } } /* Rx is currently using page_pool */ if (id == NETSEC_RING_RX) { if (xdp_rxq_info_is_reg(&dring->xdp_rxq)) xdp_rxq_info_unreg(&dring->xdp_rxq); page_pool_destroy(dring->page_pool); } memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM); memset(dring->vaddr, 0, DESC_SZ * DESC_NUM); dring->head = 0; dring->tail = 0; if (id == NETSEC_RING_TX) netdev_reset_queue(priv->ndev); } static void netsec_free_dring(struct netsec_priv *priv, int id) { struct netsec_desc_ring *dring = &priv->desc_ring[id]; if (dring->vaddr) { dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM, dring->vaddr, dring->desc_dma); dring->vaddr = NULL; } kfree(dring->desc); dring->desc = NULL; } static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id) { struct netsec_desc_ring *dring = &priv->desc_ring[id]; dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM, &dring->desc_dma, GFP_KERNEL); if (!dring->vaddr) goto err; dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL); if (!dring->desc) goto err; return 0; err: netsec_free_dring(priv, id); return -ENOMEM; } static void netsec_setup_tx_dring(struct netsec_priv *priv) { struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; int i; for (i = 0; i < DESC_NUM; i++) { struct netsec_de *de; de = dring->vaddr + (DESC_SZ * i); /* de->attr is not going to be accessed by the NIC * until netsec_set_tx_de() is called. * No need for a dma_wmb() here */ de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD; } } static int netsec_setup_rx_dring(struct netsec_priv *priv) { struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog); struct page_pool_params pp_params = { .order = 0, /* internal DMA mapping in page_pool */ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, .pool_size = DESC_NUM, .nid = NUMA_NO_NODE, .dev = priv->dev, .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, .offset = NETSEC_RXBUF_HEADROOM, .max_len = NETSEC_RX_BUF_SIZE, }; int i, err; dring->page_pool = page_pool_create(&pp_params); if (IS_ERR(dring->page_pool)) { err = PTR_ERR(dring->page_pool); dring->page_pool = NULL; goto err_out; } err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0, priv->napi.napi_id); if (err) goto err_out; err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_POOL, dring->page_pool); if (err) goto err_out; for (i = 0; i < DESC_NUM; i++) { struct netsec_desc *desc = &dring->desc[i]; dma_addr_t dma_handle; void *buf; u16 len; buf = netsec_alloc_rx_data(priv, &dma_handle, &len); if (!buf) { err = -ENOMEM; goto err_out; } desc->dma_addr = dma_handle; desc->addr = buf; desc->len = len; } netsec_rx_fill(priv, 0, DESC_NUM); return 0; err_out: netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); return err; } static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg, u32 addr_h, u32 addr_l, u32 size) { u64 base = (u64)addr_h << 32 | addr_l; void __iomem *ucode; u32 i; ucode = ioremap(base, size * sizeof(u32)); if (!ucode) return -ENOMEM; for (i = 0; i < size; i++) netsec_write(priv, reg, readl(ucode + i * 4)); iounmap(ucode); return 0; } static int netsec_netdev_load_microcode(struct netsec_priv *priv) { u32 addr_h, addr_l, size; int err; addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H); addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L); size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE); err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF, addr_h, addr_l, size); if (err) return err; addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H); addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L); size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE); err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF, addr_h, addr_l, size); if (err) return err; addr_h = 0; addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS); size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE); err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF, addr_h, addr_l, size); if (err) return err; return 0; } static int netsec_reset_hardware(struct netsec_priv *priv, bool load_ucode) { u32 value; int err; /* stop DMA engines */ if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) { netsec_write(priv, NETSEC_REG_DMA_HM_CTRL, NETSEC_DMA_CTRL_REG_STOP); netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, NETSEC_DMA_CTRL_REG_STOP); while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) & NETSEC_DMA_CTRL_REG_STOP) cpu_relax(); while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) & NETSEC_DMA_CTRL_REG_STOP) cpu_relax(); } netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET); netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN); netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL); while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0) cpu_relax(); /* set desc_start addr */ netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP, upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma)); netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW, lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma)); netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP, upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma)); netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW, lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma)); /* set normal tx dring ring config */ netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG, 1 << NETSEC_REG_DESC_ENDIAN); netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG, 1 << NETSEC_REG_DESC_ENDIAN); if (load_ucode) { err = netsec_netdev_load_microcode(priv); if (err) { netif_err(priv, probe, priv->ndev, "%s: failed to load microcode (%d)\n", __func__, err); return err; } } /* start DMA engines */ netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1); netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0); usleep_range(1000, 2000); if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) & NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) { netif_err(priv, probe, priv->ndev, "microengine start failed\n"); return -ENXIO; } netsec_write(priv, NETSEC_REG_TOP_STATUS, NETSEC_TOP_IRQ_REG_CODE_LOAD_END); value = NETSEC_PKT_CTRL_REG_MODE_NRM; if (priv->ndev->mtu > ETH_DATA_LEN) value |= NETSEC_PKT_CTRL_REG_EN_JUMBO; /* change to normal mode */ netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS); netsec_write(priv, NETSEC_REG_PKT_CTRL, value); while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) & NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0) cpu_relax(); /* clear any pending EMPTY/ERR irq status */ netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0); /* Disable TX & RX intr */ netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0); return 0; } static int netsec_start_gmac(struct netsec_priv *priv) { struct phy_device *phydev = priv->ndev->phydev; u32 value = 0; int ret; if (phydev->speed != SPEED_1000) value = (NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON); if (netsec_mac_write(priv, GMAC_REG_MCR, value)) return -ETIMEDOUT; if (netsec_mac_write(priv, GMAC_REG_BMR, NETSEC_GMAC_BMR_REG_RESET)) return -ETIMEDOUT; /* Wait soft reset */ usleep_range(1000, 5000); ret = netsec_mac_read(priv, GMAC_REG_BMR, &value); if (ret) return ret; if (value & NETSEC_GMAC_BMR_REG_SWR) return -EAGAIN; netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1); if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1)) return -ETIMEDOUT; netsec_write(priv, MAC_REG_DESC_INIT, 1); if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1)) return -ETIMEDOUT; if (netsec_mac_write(priv, GMAC_REG_BMR, NETSEC_GMAC_BMR_REG_COMMON)) return -ETIMEDOUT; if (netsec_mac_write(priv, GMAC_REG_RDLAR, NETSEC_GMAC_RDLAR_REG_COMMON)) return -ETIMEDOUT; if (netsec_mac_write(priv, GMAC_REG_TDLAR, NETSEC_GMAC_TDLAR_REG_COMMON)) return -ETIMEDOUT; if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001)) return -ETIMEDOUT; ret = netsec_mac_update_to_phy_state(priv); if (ret) return ret; ret = netsec_mac_read(priv, GMAC_REG_OMR, &value); if (ret) return ret; value |= NETSEC_GMAC_OMR_REG_SR; value |= NETSEC_GMAC_OMR_REG_ST; netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0); netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0); netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce, NULL, NULL); if (netsec_mac_write(priv, GMAC_REG_OMR, value)) return -ETIMEDOUT; return 0; } static int netsec_stop_gmac(struct netsec_priv *priv) { u32 value; int ret; ret = netsec_mac_read(priv, GMAC_REG_OMR, &value); if (ret) return ret; value &= ~NETSEC_GMAC_OMR_REG_SR; value &= ~NETSEC_GMAC_OMR_REG_ST; /* disable all interrupts */ netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0); netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0); return netsec_mac_write(priv, GMAC_REG_OMR, value); } static void netsec_phy_adjust_link(struct net_device *ndev) { struct netsec_priv *priv = netdev_priv(ndev); if (ndev->phydev->link) netsec_start_gmac(priv); else netsec_stop_gmac(priv); phy_print_status(ndev->phydev); } static irqreturn_t netsec_irq_handler(int irq, void *dev_id) { struct netsec_priv *priv = dev_id; u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS); unsigned long flags; /* Disable interrupts */ if (status & NETSEC_IRQ_TX) { val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS); netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val); } if (status & NETSEC_IRQ_RX) { val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS); netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val); } spin_lock_irqsave(&priv->reglock, flags); netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX); spin_unlock_irqrestore(&priv->reglock, flags); napi_schedule(&priv->napi); return IRQ_HANDLED; } static int netsec_netdev_open(struct net_device *ndev) { struct netsec_priv *priv = netdev_priv(ndev); int ret; pm_runtime_get_sync(priv->dev); netsec_setup_tx_dring(priv); ret = netsec_setup_rx_dring(priv); if (ret) { netif_err(priv, probe, priv->ndev, "%s: fail setup ring\n", __func__); goto err1; } ret = request_irq(priv->ndev->irq, netsec_irq_handler, IRQF_SHARED, "netsec", priv); if (ret) { netif_err(priv, drv, priv->ndev, "request_irq failed\n"); goto err2; } if (dev_of_node(priv->dev)) { if (!of_phy_connect(priv->ndev, priv->phy_np, netsec_phy_adjust_link, 0, priv->phy_interface)) { netif_err(priv, link, priv->ndev, "missing PHY\n"); ret = -ENODEV; goto err3; } } else { ret = phy_connect_direct(priv->ndev, priv->phydev, netsec_phy_adjust_link, priv->phy_interface); if (ret) { netif_err(priv, link, priv->ndev, "phy_connect_direct() failed (%d)\n", ret); goto err3; } } phy_start(ndev->phydev); netsec_start_gmac(priv); napi_enable(&priv->napi); netif_start_queue(ndev); /* Enable TX+RX intr. */ netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX); return 0; err3: free_irq(priv->ndev->irq, priv); err2: netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); err1: pm_runtime_put_sync(priv->dev); return ret; } static int netsec_netdev_stop(struct net_device *ndev) { int ret; struct netsec_priv *priv = netdev_priv(ndev); netif_stop_queue(priv->ndev); dma_wmb(); napi_disable(&priv->napi); netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0); netsec_stop_gmac(priv); free_irq(priv->ndev->irq, priv); netsec_uninit_pkt_dring(priv, NETSEC_RING_TX); netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); phy_stop(ndev->phydev); phy_disconnect(ndev->phydev); ret = netsec_reset_hardware(priv, false); pm_runtime_put_sync(priv->dev); return ret; } static int netsec_netdev_init(struct net_device *ndev) { struct netsec_priv *priv = netdev_priv(ndev); int ret; u16 data; BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM); ret = netsec_alloc_dring(priv, NETSEC_RING_TX); if (ret) return ret; ret = netsec_alloc_dring(priv, NETSEC_RING_RX); if (ret) goto err1; /* set phy power down */ data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR); netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data | BMCR_PDOWN); ret = netsec_reset_hardware(priv, true); if (ret) goto err2; /* Restore phy power state */ netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock); spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock); return 0; err2: netsec_free_dring(priv, NETSEC_RING_RX); err1: netsec_free_dring(priv, NETSEC_RING_TX); return ret; } static void netsec_netdev_uninit(struct net_device *ndev) { struct netsec_priv *priv = netdev_priv(ndev); netsec_free_dring(priv, NETSEC_RING_RX); netsec_free_dring(priv, NETSEC_RING_TX); } static int netsec_netdev_set_features(struct net_device *ndev, netdev_features_t features) { struct netsec_priv *priv = netdev_priv(ndev); priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM); return 0; } static int netsec_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, u32 flags) { struct netsec_priv *priv = netdev_priv(ndev); struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX]; int i, nxmit = 0; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; spin_lock(&tx_ring->lock); for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; err = netsec_xdp_queue_one(priv, xdpf, true); if (err != NETSEC_XDP_TX) break; tx_ring->xdp_xmit++; nxmit++; } spin_unlock(&tx_ring->lock); if (unlikely(flags & XDP_XMIT_FLUSH)) { netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit); tx_ring->xdp_xmit = 0; } return nxmit; } static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog, struct netlink_ext_ack *extack) { struct net_device *dev = priv->ndev; struct bpf_prog *old_prog; /* For now just support only the usual MTU sized frames */ if (prog && dev->mtu > 1500) { NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP"); return -EOPNOTSUPP; } if (netif_running(dev)) netsec_netdev_stop(dev); /* Detach old prog, if any */ old_prog = xchg(&priv->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); if (netif_running(dev)) netsec_netdev_open(dev); return 0; } static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp) { struct netsec_priv *priv = netdev_priv(ndev); switch (xdp->command) { case XDP_SETUP_PROG: return netsec_xdp_setup(priv, xdp->prog, xdp->extack); default: return -EINVAL; } } static const struct net_device_ops netsec_netdev_ops = { .ndo_init = netsec_netdev_init, .ndo_uninit = netsec_netdev_uninit, .ndo_open = netsec_netdev_open, .ndo_stop = netsec_netdev_stop, .ndo_start_xmit = netsec_netdev_start_xmit, .ndo_set_features = netsec_netdev_set_features, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = phy_do_ioctl, .ndo_xdp_xmit = netsec_xdp_xmit, .ndo_bpf = netsec_xdp, }; static int netsec_of_probe(struct platform_device *pdev, struct netsec_priv *priv, u32 *phy_addr) { int err; err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_interface); if (err) { dev_err(&pdev->dev, "missing required property 'phy-mode'\n"); return err; } /* * SynQuacer is physically configured with TX and RX delays * but the standard firmware claimed otherwise for a long * time, ignore it. */ if (of_machine_is_compatible("socionext,developer-box") && priv->phy_interface != PHY_INTERFACE_MODE_RGMII_ID) { dev_warn(&pdev->dev, "Outdated firmware reports incorrect PHY mode, overriding\n"); priv->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; } priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (!priv->phy_np) { dev_err(&pdev->dev, "missing required property 'phy-handle'\n"); return -EINVAL; } *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np); priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */ if (IS_ERR(priv->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk), "phy_ref_clk not found\n"); priv->freq = clk_get_rate(priv->clk); return 0; } static int netsec_acpi_probe(struct platform_device *pdev, struct netsec_priv *priv, u32 *phy_addr) { int ret; if (!IS_ENABLED(CONFIG_ACPI)) return -ENODEV; /* ACPI systems are assumed to configure the PHY in firmware, so * there is really no need to discover the PHY mode from the DSDT. * Since firmware is known to exist in the field that configures the * PHY correctly but passes the wrong mode string in the phy-mode * device property, we have no choice but to ignore it. */ priv->phy_interface = PHY_INTERFACE_MODE_NA; ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr); if (ret) return dev_err_probe(&pdev->dev, ret, "missing required property 'phy-channel'\n"); ret = device_property_read_u32(&pdev->dev, "socionext,phy-clock-frequency", &priv->freq); if (ret) return dev_err_probe(&pdev->dev, ret, "missing required property 'socionext,phy-clock-frequency'\n"); return 0; } static void netsec_unregister_mdio(struct netsec_priv *priv) { struct phy_device *phydev = priv->phydev; if (!dev_of_node(priv->dev) && phydev) { phy_device_remove(phydev); phy_device_free(phydev); } mdiobus_unregister(priv->mii_bus); } static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr) { struct mii_bus *bus; int ret; bus = devm_mdiobus_alloc(priv->dev); if (!bus) return -ENOMEM; snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev)); bus->priv = priv; bus->name = "SNI NETSEC MDIO"; bus->read = netsec_phy_read; bus->write = netsec_phy_write; bus->parent = priv->dev; priv->mii_bus = bus; if (dev_of_node(priv->dev)) { struct device_node *mdio_node, *parent = dev_of_node(priv->dev); mdio_node = of_get_child_by_name(parent, "mdio"); if (mdio_node) { parent = mdio_node; } else { /* older f/w doesn't populate the mdio subnode, * allow relaxed upgrade of f/w in due time. */ dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n"); } ret = of_mdiobus_register(bus, parent); of_node_put(mdio_node); if (ret) { dev_err(priv->dev, "mdiobus register err(%d)\n", ret); return ret; } } else { /* Mask out all PHYs from auto probing. */ bus->phy_mask = ~0; ret = mdiobus_register(bus); if (ret) { dev_err(priv->dev, "mdiobus register err(%d)\n", ret); return ret; } priv->phydev = get_phy_device(bus, phy_addr, false); if (IS_ERR(priv->phydev)) { ret = PTR_ERR(priv->phydev); dev_err(priv->dev, "get_phy_device err(%d)\n", ret); priv->phydev = NULL; mdiobus_unregister(bus); return -ENODEV; } ret = phy_device_register(priv->phydev); if (ret) { phy_device_free(priv->phydev); mdiobus_unregister(bus); dev_err(priv->dev, "phy_device_register err(%d)\n", ret); } } return ret; } static int netsec_probe(struct platform_device *pdev) { struct resource *mmio_res, *eeprom_res; struct netsec_priv *priv; u32 hw_ver, phy_addr = 0; struct net_device *ndev; int ret; int irq; mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mmio_res) { dev_err(&pdev->dev, "No MMIO resource found.\n"); return -ENODEV; } eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!eeprom_res) { dev_info(&pdev->dev, "No EEPROM resource found.\n"); return -ENODEV; } irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ndev = alloc_etherdev(sizeof(*priv)); if (!ndev) return -ENOMEM; priv = netdev_priv(ndev); spin_lock_init(&priv->reglock); SET_NETDEV_DEV(ndev, &pdev->dev); platform_set_drvdata(pdev, priv); ndev->irq = irq; priv->dev = &pdev->dev; priv->ndev = ndev; priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | NETIF_MSG_PROBE; priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start, resource_size(mmio_res)); if (!priv->ioaddr) { dev_err(&pdev->dev, "devm_ioremap() failed\n"); ret = -ENXIO; goto free_ndev; } priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start, resource_size(eeprom_res)); if (!priv->eeprom_base) { dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n"); ret = -ENXIO; goto free_ndev; } ret = device_get_ethdev_address(&pdev->dev, ndev); if (ret && priv->eeprom_base) { void __iomem *macp = priv->eeprom_base + NETSEC_EEPROM_MAC_ADDRESS; u8 addr[ETH_ALEN]; addr[0] = readb(macp + 3); addr[1] = readb(macp + 2); addr[2] = readb(macp + 1); addr[3] = readb(macp + 0); addr[4] = readb(macp + 7); addr[5] = readb(macp + 6); eth_hw_addr_set(ndev, addr); } if (!is_valid_ether_addr(ndev->dev_addr)) { dev_warn(&pdev->dev, "No MAC address found, using random\n"); eth_hw_addr_random(ndev); } if (dev_of_node(&pdev->dev)) ret = netsec_of_probe(pdev, priv, &phy_addr); else ret = netsec_acpi_probe(pdev, priv, &phy_addr); if (ret) goto free_ndev; priv->phy_addr = phy_addr; if (!priv->freq) { dev_err(&pdev->dev, "missing PHY reference clock frequency\n"); ret = -ENODEV; goto free_ndev; } /* default for throughput */ priv->et_coalesce.rx_coalesce_usecs = 500; priv->et_coalesce.rx_max_coalesced_frames = 8; priv->et_coalesce.tx_coalesce_usecs = 500; priv->et_coalesce.tx_max_coalesced_frames = 8; ret = device_property_read_u32(&pdev->dev, "max-frame-size", &ndev->max_mtu); if (ret < 0) ndev->max_mtu = ETH_DATA_LEN; /* runtime_pm coverage just for probe, open/close also cover it */ pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER); /* this driver only supports F_TAIKI style NETSEC */ if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) != NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) { ret = -ENODEV; goto pm_disable; } dev_info(&pdev->dev, "hardware revision %d.%d\n", hw_ver >> 16, hw_ver & 0xffff); netif_napi_add(ndev, &priv->napi, netsec_napi_poll); ndev->netdev_ops = &netsec_netdev_ops; ndev->ethtool_ops = &netsec_ethtool_ops; ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; ndev->hw_features = ndev->features; ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_NDO_XMIT; priv->rx_cksum_offload_flag = true; ret = netsec_register_mdio(priv, phy_addr); if (ret) goto unreg_napi; if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) dev_warn(&pdev->dev, "Failed to set DMA mask\n"); ret = register_netdev(ndev); if (ret) { netif_err(priv, probe, ndev, "register_netdev() failed\n"); goto unreg_mii; } pm_runtime_put_sync(&pdev->dev); return 0; unreg_mii: netsec_unregister_mdio(priv); unreg_napi: netif_napi_del(&priv->napi); pm_disable: pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); free_ndev: free_netdev(ndev); dev_err(&pdev->dev, "init failed\n"); return ret; } static int netsec_remove(struct platform_device *pdev) { struct netsec_priv *priv = platform_get_drvdata(pdev); unregister_netdev(priv->ndev); netsec_unregister_mdio(priv); netif_napi_del(&priv->napi); pm_runtime_disable(&pdev->dev); free_netdev(priv->ndev); return 0; } #ifdef CONFIG_PM static int netsec_runtime_suspend(struct device *dev) { struct netsec_priv *priv = dev_get_drvdata(dev); netsec_write(priv, NETSEC_REG_CLK_EN, 0); clk_disable_unprepare(priv->clk); return 0; } static int netsec_runtime_resume(struct device *dev) { struct netsec_priv *priv = dev_get_drvdata(dev); clk_prepare_enable(priv->clk); netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D | NETSEC_CLK_EN_REG_DOM_C | NETSEC_CLK_EN_REG_DOM_G); return 0; } #endif static const struct dev_pm_ops netsec_pm_ops = { SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL) }; static const struct of_device_id netsec_dt_ids[] = { { .compatible = "socionext,synquacer-netsec" }, { } }; MODULE_DEVICE_TABLE(of, netsec_dt_ids); #ifdef CONFIG_ACPI static const struct acpi_device_id netsec_acpi_ids[] = { { "SCX0001" }, { } }; MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids); #endif static struct platform_driver netsec_driver = { .probe = netsec_probe, .remove = netsec_remove, .driver = { .name = "netsec", .pm = &netsec_pm_ops, .of_match_table = netsec_dt_ids, .acpi_match_table = ACPI_PTR(netsec_acpi_ids), }, }; module_platform_driver(netsec_driver); MODULE_AUTHOR("Jassi Brar <[email protected]>"); MODULE_AUTHOR("Ard Biesheuvel <[email protected]>"); MODULE_DESCRIPTION("NETSEC Ethernet driver"); MODULE_LICENSE("GPL");
linux-master
drivers/net/ethernet/socionext/netsec.c
// SPDX-License-Identifier: GPL-2.0-only /* Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/pci.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/completion.h> #include <linux/semaphore.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include "hinic_hw_if.h" #include "hinic_hw_mgmt.h" #include "hinic_hw_csr.h" #include "hinic_hw_dev.h" #include "hinic_hw_mbox.h" #define HINIC_MBOX_INT_DST_FUNC_SHIFT 0 #define HINIC_MBOX_INT_DST_AEQN_SHIFT 10 #define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT 12 #define HINIC_MBOX_INT_STAT_DMA_SHIFT 14 /* The size of data to be sended (unit of 4 bytes) */ #define HINIC_MBOX_INT_TX_SIZE_SHIFT 20 /* SO_RO(strong order, relax order) */ #define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25 #define HINIC_MBOX_INT_WB_EN_SHIFT 28 #define HINIC_MBOX_INT_DST_FUNC_MASK 0x3FF #define HINIC_MBOX_INT_DST_AEQN_MASK 0x3 #define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK 0x3 #define HINIC_MBOX_INT_STAT_DMA_MASK 0x3F #define HINIC_MBOX_INT_TX_SIZE_MASK 0x1F #define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3 #define HINIC_MBOX_INT_WB_EN_MASK 0x1 #define HINIC_MBOX_INT_SET(val, field) \ (((val) & HINIC_MBOX_INT_##field##_MASK) << \ HINIC_MBOX_INT_##field##_SHIFT) enum hinic_mbox_tx_status { TX_NOT_DONE = 1, }; #define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0 /* specifies the issue request for the message data. * 0 - Tx request is done; * 1 - Tx request is in process. */ #define HINIC_MBOX_CTRL_TX_STATUS_SHIFT 1 #define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1 #define HINIC_MBOX_CTRL_TX_STATUS_MASK 0x1 #define HINIC_MBOX_CTRL_SET(val, field) \ (((val) & HINIC_MBOX_CTRL_##field##_MASK) << \ HINIC_MBOX_CTRL_##field##_SHIFT) #define HINIC_MBOX_HEADER_MSG_LEN_SHIFT 0 #define HINIC_MBOX_HEADER_MODULE_SHIFT 11 #define HINIC_MBOX_HEADER_SEG_LEN_SHIFT 16 #define HINIC_MBOX_HEADER_NO_ACK_SHIFT 22 #define HINIC_MBOX_HEADER_SEQID_SHIFT 24 #define HINIC_MBOX_HEADER_LAST_SHIFT 30 /* specifies the mailbox message direction * 0 - send * 1 - receive */ #define HINIC_MBOX_HEADER_DIRECTION_SHIFT 31 #define HINIC_MBOX_HEADER_CMD_SHIFT 32 #define HINIC_MBOX_HEADER_MSG_ID_SHIFT 40 #define HINIC_MBOX_HEADER_STATUS_SHIFT 48 #define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT 54 #define HINIC_MBOX_HEADER_MSG_LEN_MASK 0x7FF #define HINIC_MBOX_HEADER_MODULE_MASK 0x1F #define HINIC_MBOX_HEADER_SEG_LEN_MASK 0x3F #define HINIC_MBOX_HEADER_NO_ACK_MASK 0x1 #define HINIC_MBOX_HEADER_SEQID_MASK 0x3F #define HINIC_MBOX_HEADER_LAST_MASK 0x1 #define HINIC_MBOX_HEADER_DIRECTION_MASK 0x1 #define HINIC_MBOX_HEADER_CMD_MASK 0xFF #define HINIC_MBOX_HEADER_MSG_ID_MASK 0xFF #define HINIC_MBOX_HEADER_STATUS_MASK 0x3F #define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK 0x3FF #define HINIC_MBOX_HEADER_GET(val, field) \ (((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \ HINIC_MBOX_HEADER_##field##_MASK) #define HINIC_MBOX_HEADER_SET(val, field) \ ((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \ HINIC_MBOX_HEADER_##field##_SHIFT) #define MBOX_SEGLEN_MASK \ HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, SEG_LEN) #define HINIC_MBOX_SEG_LEN 48 #define HINIC_MBOX_COMP_TIME 8000U #define MBOX_MSG_POLLING_TIMEOUT 8000 #define HINIC_MBOX_DATA_SIZE 2040 #define MBOX_MAX_BUF_SZ 2048UL #define MBOX_HEADER_SZ 8 #define MBOX_INFO_SZ 4 /* MBOX size is 64B, 8B for mbox_header, 4B reserved */ #define MBOX_SEG_LEN 48 #define MBOX_SEG_LEN_ALIGN 4 #define MBOX_WB_STATUS_LEN 16UL /* mbox write back status is 16B, only first 4B is used */ #define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF #define MBOX_WB_STATUS_MASK 0xFF #define MBOX_WB_ERROR_CODE_MASK 0xFF00 #define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF #define MBOX_WB_STATUS_NOT_FINISHED 0x00 #define MBOX_STATUS_FINISHED(wb) \ (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED) #define MBOX_STATUS_SUCCESS(wb) \ (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS) #define MBOX_STATUS_ERRCODE(wb) \ ((wb) & MBOX_WB_ERROR_CODE_MASK) #define SEQ_ID_START_VAL 0 #define SEQ_ID_MAX_VAL 42 #define NO_DMA_ATTRIBUTE_VAL 0 #define HINIC_MBOX_RSP_AEQN 2 #define HINIC_MBOX_RECV_AEQN 0 #define MBOX_MSG_NO_DATA_LEN 1 #define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ) #define MBOX_AREA(hwif) \ ((hwif)->cfg_regs_bar + HINIC_FUNC_CSR_MAILBOX_DATA_OFF) #define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS) #define MBOX_MSG_ID_MASK 0xFF #define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id) #define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \ (MBOX_MSG_ID(func_to_func_mbox) + 1) & MBOX_MSG_ID_MASK) #define FUNC_ID_OFF_SET_8B 8 /* max message counter wait to process for one function */ #define HINIC_MAX_MSG_CNT_TO_PROCESS 10 #define HINIC_QUEUE_MIN_DEPTH 6 #define HINIC_QUEUE_MAX_DEPTH 12 #define HINIC_MAX_RX_BUFFER_SIZE 15 enum hinic_hwif_direction_type { HINIC_HWIF_DIRECT_SEND = 0, HINIC_HWIF_RESPONSE = 1, }; enum mbox_send_mod { MBOX_SEND_MSG_INT, }; enum mbox_seg_type { NOT_LAST_SEG, LAST_SEG, }; enum mbox_ordering_type { STRONG_ORDER, }; enum mbox_write_back_type { WRITE_BACK = 1, }; enum mbox_aeq_trig_type { NOT_TRIGGER, TRIGGER, }; static bool check_func_id(struct hinic_hwdev *hwdev, u16 src_func_idx, const void *buf_in, u16 in_size, u16 offset) { u16 func_idx; if (in_size < offset + sizeof(func_idx)) { dev_warn(&hwdev->hwif->pdev->dev, "Receive mailbox msg len: %d less than %d Bytes is invalid\n", in_size, offset); return false; } func_idx = *((u16 *)((u8 *)buf_in + offset)); if (src_func_idx != func_idx) { dev_warn(&hwdev->hwif->pdev->dev, "Receive mailbox function id: 0x%x not equal to msg function id: 0x%x\n", src_func_idx, func_idx); return false; } return true; } bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx, void *buf_in, u16 in_size) { return check_func_id(hwdev, func_idx, buf_in, in_size, FUNC_ID_OFF_SET_8B); } /** * hinic_register_pf_mbox_cb - register mbox callback for pf * @hwdev: the pointer to hw device * @mod: specific mod that the callback will handle * @callback: callback function * Return: 0 - success, negative - failure */ int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, hinic_pf_mbox_cb callback) { struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; if (mod >= HINIC_MOD_MAX) return -EFAULT; func_to_func->pf_mbox_cb[mod] = callback; set_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); return 0; } /** * hinic_register_vf_mbox_cb - register mbox callback for vf * @hwdev: the pointer to hw device * @mod: specific mod that the callback will handle * @callback: callback function * Return: 0 - success, negative - failure */ int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, hinic_vf_mbox_cb callback) { struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; if (mod >= HINIC_MOD_MAX) return -EFAULT; func_to_func->vf_mbox_cb[mod] = callback; set_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); return 0; } /** * hinic_unregister_pf_mbox_cb - unregister the mbox callback for pf * @hwdev: the pointer to hw device * @mod: specific mod that the callback will handle */ void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev, enum hinic_mod_type mod) { struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; clear_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); while (test_bit(HINIC_PF_MBOX_CB_RUNNING, &func_to_func->pf_mbox_cb_state[mod])) usleep_range(900, 1000); func_to_func->pf_mbox_cb[mod] = NULL; } /** * hinic_unregister_vf_mbox_cb - unregister the mbox callback for vf * @hwdev: the pointer to hw device * @mod: specific mod that the callback will handle */ void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev, enum hinic_mod_type mod) { struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; clear_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); while (test_bit(HINIC_VF_MBOX_CB_RUNNING, &func_to_func->vf_mbox_cb_state[mod])) usleep_range(900, 1000); func_to_func->vf_mbox_cb[mod] = NULL; } static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, struct hinic_recv_mbox *recv_mbox, void *buf_out, u16 *out_size) { hinic_vf_mbox_cb cb; int ret = 0; if (recv_mbox->mod >= HINIC_MOD_MAX) { dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n", recv_mbox->mod); return -EINVAL; } set_bit(HINIC_VF_MBOX_CB_RUNNING, &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); cb = func_to_func->vf_mbox_cb[recv_mbox->mod]; if (cb && test_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) { cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox, recv_mbox->mbox_len, buf_out, out_size); } else { dev_err(&func_to_func->hwif->pdev->dev, "VF mbox cb is not registered\n"); ret = -EINVAL; } clear_bit(HINIC_VF_MBOX_CB_RUNNING, &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); return ret; } static int recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, struct hinic_recv_mbox *recv_mbox, u16 src_func_idx, void *buf_out, u16 *out_size) { hinic_pf_mbox_cb cb; u16 vf_id = 0; int ret; if (recv_mbox->mod >= HINIC_MOD_MAX) { dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n", recv_mbox->mod); return -EINVAL; } set_bit(HINIC_PF_MBOX_CB_RUNNING, &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); cb = func_to_func->pf_mbox_cb[recv_mbox->mod]; if (cb && test_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) { vf_id = src_func_idx - hinic_glb_pf_vf_offset(func_to_func->hwif); ret = cb(func_to_func->hwdev, vf_id, recv_mbox->cmd, recv_mbox->mbox, recv_mbox->mbox_len, buf_out, out_size); } else { dev_err(&func_to_func->hwif->pdev->dev, "PF mbox mod(0x%x) cb is not registered\n", recv_mbox->mod); ret = -EINVAL; } clear_bit(HINIC_PF_MBOX_CB_RUNNING, &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); return ret; } static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox, u8 seq_id, u8 seg_len) { if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN) return false; if (seq_id == 0) { recv_mbox->seq_id = seq_id; } else { if (seq_id != recv_mbox->seq_id + 1) return false; recv_mbox->seq_id = seq_id; } return true; } static void resp_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, struct hinic_recv_mbox *recv_mbox) { spin_lock(&func_to_func->mbox_lock); if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id && func_to_func->event_flag == EVENT_START) complete(&recv_mbox->recv_done); else dev_err(&func_to_func->hwif->pdev->dev, "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n", func_to_func->send_msg_id, recv_mbox->msg_info.msg_id, recv_mbox->msg_info.status); spin_unlock(&func_to_func->mbox_lock); } static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, struct hinic_recv_mbox *recv_mbox, u16 src_func_idx); static void recv_func_mbox_work_handler(struct work_struct *work) { struct hinic_mbox_work *mbox_work = container_of(work, struct hinic_mbox_work, work); struct hinic_recv_mbox *recv_mbox; recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox, mbox_work->src_func_idx); recv_mbox = &mbox_work->func_to_func->mbox_send[mbox_work->src_func_idx]; atomic_dec(&recv_mbox->msg_cnt); kfree(mbox_work); } static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, void *header, struct hinic_recv_mbox *recv_mbox) { void *mbox_body = MBOX_BODY_FROM_HDR(header); struct hinic_recv_mbox *rcv_mbox_temp = NULL; u64 mbox_header = *((u64 *)header); struct hinic_mbox_work *mbox_work; u8 seq_id, seg_len; u16 src_func_idx; int pos; seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID); seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN); src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) { dev_err(&func_to_func->hwif->pdev->dev, "Mailbox sequence and segment check fail, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n", src_func_idx, recv_mbox->seq_id, seq_id, seg_len); recv_mbox->seq_id = SEQ_ID_MAX_VAL; return; } pos = seq_id * MBOX_SEG_LEN; memcpy((u8 *)recv_mbox->mbox + pos, mbox_body, HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN)); if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST)) return; recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD); recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE); recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN); recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK); recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID); recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS); recv_mbox->seq_id = SEQ_ID_MAX_VAL; if (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) == HINIC_HWIF_RESPONSE) { resp_mbox_handler(func_to_func, recv_mbox); return; } if (atomic_read(&recv_mbox->msg_cnt) > HINIC_MAX_MSG_CNT_TO_PROCESS) { dev_warn(&func_to_func->hwif->pdev->dev, "This function(%u) have %d message wait to process,can't add to work queue\n", src_func_idx, atomic_read(&recv_mbox->msg_cnt)); return; } rcv_mbox_temp = kmemdup(recv_mbox, sizeof(*rcv_mbox_temp), GFP_KERNEL); if (!rcv_mbox_temp) return; rcv_mbox_temp->mbox = kmemdup(recv_mbox->mbox, MBOX_MAX_BUF_SZ, GFP_KERNEL); if (!rcv_mbox_temp->mbox) goto err_alloc_rcv_mbox_msg; rcv_mbox_temp->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); if (!rcv_mbox_temp->buf_out) goto err_alloc_rcv_mbox_buf; mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL); if (!mbox_work) goto err_alloc_mbox_work; mbox_work->func_to_func = func_to_func; mbox_work->recv_mbox = rcv_mbox_temp; mbox_work->src_func_idx = src_func_idx; atomic_inc(&recv_mbox->msg_cnt); INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler); queue_work(func_to_func->workq, &mbox_work->work); return; err_alloc_mbox_work: kfree(rcv_mbox_temp->buf_out); err_alloc_rcv_mbox_buf: kfree(rcv_mbox_temp->mbox); err_alloc_rcv_mbox_msg: kfree(rcv_mbox_temp); } static int set_vf_mbox_random_id(struct hinic_hwdev *hwdev, u16 func_id) { struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; struct hinic_set_random_id rand_info = {0}; u16 out_size = sizeof(rand_info); struct hinic_pfhwdev *pfhwdev; int ret; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); rand_info.version = HINIC_CMD_VER_FUNC_ID; rand_info.func_idx = func_id; rand_info.vf_in_pf = func_id - hinic_glb_pf_vf_offset(hwdev->hwif); rand_info.random_id = get_random_u32(); func_to_func->vf_mbx_rand_id[func_id] = rand_info.random_id; ret = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, HINIC_MGMT_CMD_SET_VF_RANDOM_ID, &rand_info, sizeof(rand_info), &rand_info, &out_size, HINIC_MGMT_MSG_SYNC); if ((rand_info.status != HINIC_MGMT_CMD_UNSUPPORTED && rand_info.status) || !out_size || ret) { dev_err(&hwdev->hwif->pdev->dev, "Set VF random id failed, err: %d, status: 0x%x, out size: 0x%x\n", ret, rand_info.status, out_size); return -EIO; } if (rand_info.status == HINIC_MGMT_CMD_UNSUPPORTED) return rand_info.status; func_to_func->vf_mbx_old_rand_id[func_id] = func_to_func->vf_mbx_rand_id[func_id]; return 0; } static void update_random_id_work_handler(struct work_struct *work) { struct hinic_mbox_work *mbox_work = container_of(work, struct hinic_mbox_work, work); struct hinic_mbox_func_to_func *func_to_func; u16 src = mbox_work->src_func_idx; func_to_func = mbox_work->func_to_func; if (set_vf_mbox_random_id(func_to_func->hwdev, src)) dev_warn(&func_to_func->hwdev->hwif->pdev->dev, "Update VF id: 0x%x random id failed\n", mbox_work->src_func_idx); kfree(mbox_work); } static bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func, u8 *header) { struct hinic_hwdev *hwdev = func_to_func->hwdev; struct hinic_mbox_work *mbox_work = NULL; u64 mbox_header = *((u64 *)header); u16 offset, src; u32 random_id; int vf_in_pf; src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); if (IS_PF_OR_PPF_SRC(src) || !func_to_func->support_vf_random) return true; if (!HINIC_IS_PPF(hwdev->hwif)) { offset = hinic_glb_pf_vf_offset(hwdev->hwif); vf_in_pf = src - offset; if (vf_in_pf < 1 || vf_in_pf > hwdev->nic_cap.max_vf) { dev_warn(&hwdev->hwif->pdev->dev, "Receive vf id(0x%x) is invalid, vf id should be from 0x%x to 0x%x\n", src, offset + 1, hwdev->nic_cap.max_vf + offset); return false; } } random_id = be32_to_cpu(*(u32 *)(header + MBOX_SEG_LEN + MBOX_HEADER_SZ)); if (random_id == func_to_func->vf_mbx_rand_id[src] || random_id == func_to_func->vf_mbx_old_rand_id[src]) return true; dev_warn(&hwdev->hwif->pdev->dev, "The mailbox random id(0x%x) of func_id(0x%x) doesn't match with pf reservation(0x%x)\n", random_id, src, func_to_func->vf_mbx_rand_id[src]); mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL); if (!mbox_work) return false; mbox_work->func_to_func = func_to_func; mbox_work->src_func_idx = src; INIT_WORK(&mbox_work->work, update_random_id_work_handler); queue_work(func_to_func->workq, &mbox_work->work); return false; } static void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size) { struct hinic_mbox_func_to_func *func_to_func; u64 mbox_header = *((u64 *)header); struct hinic_recv_mbox *recv_mbox; u64 src, dir; func_to_func = ((struct hinic_hwdev *)handle)->func_to_func; dir = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION); src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); if (src >= HINIC_MAX_FUNCTIONS) { dev_err(&func_to_func->hwif->pdev->dev, "Mailbox source function id:%u is invalid\n", (u32)src); return; } if (!check_vf_mbox_random_id(func_to_func, header)) return; recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ? &func_to_func->mbox_send[src] : &func_to_func->mbox_resp[src]; recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox); } static void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size) { struct hinic_mbox_func_to_func *func_to_func; struct hinic_send_mbox *send_mbox; func_to_func = ((struct hinic_hwdev *)handle)->func_to_func; send_mbox = &func_to_func->send_mbox; complete(&send_mbox->send_done); } static void clear_mbox_status(struct hinic_send_mbox *mbox) { *mbox->wb_status = 0; /* clear mailbox write back status */ wmb(); } static void mbox_copy_header(struct hinic_hwdev *hwdev, struct hinic_send_mbox *mbox, u64 *header) { u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32); u32 *data = (u32 *)header; for (i = 0; i < idx_max; i++) __raw_writel(*(data + i), mbox->data + i * sizeof(u32)); } static void mbox_copy_send_data(struct hinic_hwdev *hwdev, struct hinic_send_mbox *mbox, void *seg, u16 seg_len) { u8 mbox_max_buf[MBOX_SEG_LEN] = {0}; u32 data_len, chk_sz = sizeof(u32); u32 *data = seg; u32 i, idx_max; /* The mbox message should be aligned in 4 bytes. */ if (seg_len % chk_sz) { memcpy(mbox_max_buf, seg, seg_len); data = (u32 *)mbox_max_buf; } data_len = seg_len; idx_max = ALIGN(data_len, chk_sz) / chk_sz; for (i = 0; i < idx_max; i++) __raw_writel(*(data + i), mbox->data + MBOX_HEADER_SZ + i * sizeof(u32)); } static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func, u16 dst_func, u16 dst_aeqn, u16 seg_len, int poll) { u16 rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN; u32 mbox_int, mbox_ctrl; mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) | HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) | HINIC_MBOX_INT_SET(rsp_aeq, SRC_RESP_AEQN) | HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) | HINIC_MBOX_INT_SET(ALIGN(MBOX_SEG_LEN + MBOX_HEADER_SZ + MBOX_INFO_SZ, MBOX_SEG_LEN_ALIGN) >> 2, TX_SIZE) | HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) | HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN); hinic_hwif_write_reg(func_to_func->hwif, HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int); wmb(); /* writing the mbox int attributes */ mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS); if (poll) mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE); else mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE); hinic_hwif_write_reg(func_to_func->hwif, HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl); } static void dump_mox_reg(struct hinic_hwdev *hwdev) { u32 val; val = hinic_hwif_read_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF); dev_err(&hwdev->hwif->pdev->dev, "Mailbox control reg: 0x%x\n", val); val = hinic_hwif_read_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF); dev_err(&hwdev->hwif->pdev->dev, "Mailbox interrupt offset: 0x%x\n", val); } static u16 get_mbox_status(struct hinic_send_mbox *mbox) { /* write back is 16B, but only use first 4B */ u64 wb_val = be64_to_cpu(*mbox->wb_status); rmb(); /* verify reading before check */ return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK); } static int wait_for_mbox_seg_completion(struct hinic_mbox_func_to_func *func_to_func, int poll, u16 *wb_status) { struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; struct hinic_hwdev *hwdev = func_to_func->hwdev; struct completion *done = &send_mbox->send_done; u32 cnt = 0; unsigned long jif; if (poll) { while (cnt < MBOX_MSG_POLLING_TIMEOUT) { *wb_status = get_mbox_status(send_mbox); if (MBOX_STATUS_FINISHED(*wb_status)) break; usleep_range(900, 1000); cnt++; } if (cnt == MBOX_MSG_POLLING_TIMEOUT) { dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n", *wb_status); dump_mox_reg(hwdev); return -ETIMEDOUT; } } else { jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME); if (!wait_for_completion_timeout(done, jif)) { dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout\n"); dump_mox_reg(hwdev); hinic_dump_aeq_info(hwdev); return -ETIMEDOUT; } *wb_status = get_mbox_status(send_mbox); } return 0; } static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func, u64 header, u16 dst_func, void *seg, u16 seg_len, int poll, void *msg_info) { struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION); struct hinic_hwdev *hwdev = func_to_func->hwdev; struct completion *done = &send_mbox->send_done; u8 num_aeqs = hwdev->hwif->attr.num_aeqs; u16 dst_aeqn, wb_status = 0, errcode; if (num_aeqs >= 4) dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ? HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN; else dst_aeqn = 0; if (!poll) init_completion(done); clear_mbox_status(send_mbox); mbox_copy_header(hwdev, send_mbox, &header); mbox_copy_send_data(hwdev, send_mbox, seg, seg_len); write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, poll); wmb(); /* writing the mbox msg attributes */ if (wait_for_mbox_seg_completion(func_to_func, poll, &wb_status)) return -ETIMEDOUT; if (!MBOX_STATUS_SUCCESS(wb_status)) { dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment to function %d error, wb status: 0x%x\n", dst_func, wb_status); errcode = MBOX_STATUS_ERRCODE(wb_status); return errcode ? errcode : -EFAULT; } return 0; } static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, enum hinic_mod_type mod, u16 cmd, void *msg, u16 msg_len, u16 dst_func, enum hinic_hwif_direction_type direction, enum hinic_mbox_ack_type ack_type, struct mbox_msg_info *msg_info) { struct hinic_hwdev *hwdev = func_to_func->hwdev; u16 seg_len = MBOX_SEG_LEN; u8 *msg_seg = (u8 *)msg; u16 left = msg_len; u32 seq_id = 0; u64 header = 0; int err = 0; down(&func_to_func->msg_send_sem); header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) | HINIC_MBOX_HEADER_SET(mod, MODULE) | HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) | HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) | HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) | HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) | HINIC_MBOX_HEADER_SET(direction, DIRECTION) | HINIC_MBOX_HEADER_SET(cmd, CMD) | /* The vf's offset to it's associated pf */ HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) | HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) | HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif), SRC_GLB_FUNC_IDX); while (!(HINIC_MBOX_HEADER_GET(header, LAST))) { if (left <= HINIC_MBOX_SEG_LEN) { header &= ~MBOX_SEGLEN_MASK; header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN); header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST); seg_len = left; } err = send_mbox_seg(func_to_func, header, dst_func, msg_seg, seg_len, MBOX_SEND_MSG_INT, msg_info); if (err) { dev_err(&hwdev->hwif->pdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n", HINIC_MBOX_HEADER_GET(header, SEQID)); goto err_send_mbox_seg; } left -= HINIC_MBOX_SEG_LEN; msg_seg += HINIC_MBOX_SEG_LEN; seq_id++; header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK, SEQID)); header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID); } err_send_mbox_seg: up(&func_to_func->msg_send_sem); return err; } static void response_for_recv_func_mbox(struct hinic_mbox_func_to_func *func_to_func, struct hinic_recv_mbox *recv_mbox, int err, u16 out_size, u16 src_func_idx) { struct mbox_msg_info msg_info = {0}; if (recv_mbox->ack_type == MBOX_ACK) { msg_info.msg_id = recv_mbox->msg_info.msg_id; if (err == HINIC_MBOX_PF_BUSY_ACTIVE_FW) msg_info.status = HINIC_MBOX_PF_BUSY_ACTIVE_FW; else if (err == HINIC_MBOX_VF_CMD_ERROR) msg_info.status = HINIC_MBOX_VF_CMD_ERROR; else if (err) msg_info.status = HINIC_MBOX_PF_SEND_ERR; /* if no data needs to response, set out_size to 1 */ if (!out_size || err) out_size = MBOX_MSG_NO_DATA_LEN; send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd, recv_mbox->buf_out, out_size, src_func_idx, HINIC_HWIF_RESPONSE, MBOX_ACK, &msg_info); } } static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, struct hinic_recv_mbox *recv_mbox, u16 src_func_idx) { void *buf_out = recv_mbox->buf_out; u16 out_size = MBOX_MAX_BUF_SZ; int err = 0; if (HINIC_IS_VF(func_to_func->hwif)) { err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out, &out_size); } else { if (IS_PF_OR_PPF_SRC(src_func_idx)) dev_warn(&func_to_func->hwif->pdev->dev, "Unsupported pf2pf mbox msg\n"); else err = recv_pf_from_vf_mbox_handler(func_to_func, recv_mbox, src_func_idx, buf_out, &out_size); } response_for_recv_func_mbox(func_to_func, recv_mbox, err, out_size, src_func_idx); kfree(recv_mbox->buf_out); kfree(recv_mbox->mbox); kfree(recv_mbox); } static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func, enum mbox_event_state event_flag) { spin_lock(&func_to_func->mbox_lock); func_to_func->event_flag = event_flag; spin_unlock(&func_to_func->mbox_lock); } static int mbox_resp_info_handler(struct hinic_mbox_func_to_func *func_to_func, struct hinic_recv_mbox *mbox_for_resp, enum hinic_mod_type mod, u16 cmd, void *buf_out, u16 *out_size) { int err; if (mbox_for_resp->msg_info.status) { err = mbox_for_resp->msg_info.status; if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) dev_err(&func_to_func->hwif->pdev->dev, "Mbox response error(0x%x)\n", mbox_for_resp->msg_info.status); return err; } if (buf_out && out_size) { if (*out_size < mbox_for_resp->mbox_len) { dev_err(&func_to_func->hwif->pdev->dev, "Invalid response mbox message length: %d for mod %d cmd %d, should less than: %d\n", mbox_for_resp->mbox_len, mod, cmd, *out_size); return -EFAULT; } if (mbox_for_resp->mbox_len) memcpy(buf_out, mbox_for_resp->mbox, mbox_for_resp->mbox_len); *out_size = mbox_for_resp->mbox_len; } return 0; } int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, enum hinic_mod_type mod, u16 cmd, u16 dst_func, void *buf_in, u16 in_size, void *buf_out, u16 *out_size, u32 timeout) { struct hinic_recv_mbox *mbox_for_resp; struct mbox_msg_info msg_info = {0}; unsigned long timeo; int err; mbox_for_resp = &func_to_func->mbox_resp[dst_func]; down(&func_to_func->mbox_send_sem); init_completion(&mbox_for_resp->recv_done); msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func); set_mbox_to_func_event(func_to_func, EVENT_START); err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size, dst_func, HINIC_HWIF_DIRECT_SEND, MBOX_ACK, &msg_info); if (err) { dev_err(&func_to_func->hwif->pdev->dev, "Send mailbox failed, msg_id: %d\n", msg_info.msg_id); set_mbox_to_func_event(func_to_func, EVENT_FAIL); goto err_send_mbox; } timeo = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME); if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo)) { set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT); dev_err(&func_to_func->hwif->pdev->dev, "Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id); hinic_dump_aeq_info(func_to_func->hwdev); err = -ETIMEDOUT; goto err_send_mbox; } set_mbox_to_func_event(func_to_func, EVENT_END); err = mbox_resp_info_handler(func_to_func, mbox_for_resp, mod, cmd, buf_out, out_size); err_send_mbox: up(&func_to_func->mbox_send_sem); return err; } static int mbox_func_params_valid(struct hinic_mbox_func_to_func *func_to_func, void *buf_in, u16 in_size) { if (in_size > HINIC_MBOX_DATA_SIZE) { dev_err(&func_to_func->hwif->pdev->dev, "Mbox msg len(%d) exceed limit(%d)\n", in_size, HINIC_MBOX_DATA_SIZE); return -EINVAL; } return 0; } int hinic_mbox_to_pf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size, u32 timeout) { struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; int err = mbox_func_params_valid(func_to_func, buf_in, in_size); if (err) return err; if (!HINIC_IS_VF(hwdev->hwif)) { dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n", HINIC_FUNC_TYPE(hwdev->hwif)); return -EINVAL; } return hinic_mbox_to_func(func_to_func, mod, cmd, hinic_pf_id_of_vf_hw(hwdev->hwif), buf_in, in_size, buf_out, out_size, timeout); } int hinic_mbox_to_vf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size, u32 timeout) { struct hinic_mbox_func_to_func *func_to_func; u16 dst_func_idx; int err; if (!hwdev) return -EINVAL; func_to_func = hwdev->func_to_func; err = mbox_func_params_valid(func_to_func, buf_in, in_size); if (err) return err; if (HINIC_IS_VF(hwdev->hwif)) { dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n", HINIC_FUNC_TYPE(hwdev->hwif)); return -EINVAL; } if (!vf_id) { dev_err(&hwdev->hwif->pdev->dev, "VF id(%d) error!\n", vf_id); return -EINVAL; } /* vf_offset_to_pf + vf_id is the vf's global function id of vf in * this pf */ dst_func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id; return hinic_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in, in_size, buf_out, out_size, timeout); } static int init_mbox_info(struct hinic_recv_mbox *mbox_info) { int err; mbox_info->seq_id = SEQ_ID_MAX_VAL; mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); if (!mbox_info->mbox) return -ENOMEM; mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); if (!mbox_info->buf_out) { err = -ENOMEM; goto err_alloc_buf_out; } atomic_set(&mbox_info->msg_cnt, 0); return 0; err_alloc_buf_out: kfree(mbox_info->mbox); return err; } static void clean_mbox_info(struct hinic_recv_mbox *mbox_info) { kfree(mbox_info->buf_out); kfree(mbox_info->mbox); } static int alloc_mbox_info(struct hinic_hwdev *hwdev, struct hinic_recv_mbox *mbox_info) { u16 func_idx, i; int err; for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) { err = init_mbox_info(&mbox_info[func_idx]); if (err) { dev_err(&hwdev->hwif->pdev->dev, "Failed to init function %d mbox info\n", func_idx); goto err_init_mbox_info; } } return 0; err_init_mbox_info: for (i = 0; i < func_idx; i++) clean_mbox_info(&mbox_info[i]); return err; } static void free_mbox_info(struct hinic_recv_mbox *mbox_info) { u16 func_idx; for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) clean_mbox_info(&mbox_info[func_idx]); } static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func) { struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; send_mbox->data = MBOX_AREA(func_to_func->hwif); } static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func) { struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; struct hinic_hwdev *hwdev = func_to_func->hwdev; u32 addr_h, addr_l; send_mbox->wb_vaddr = dma_alloc_coherent(&hwdev->hwif->pdev->dev, MBOX_WB_STATUS_LEN, &send_mbox->wb_paddr, GFP_KERNEL); if (!send_mbox->wb_vaddr) return -ENOMEM; send_mbox->wb_status = send_mbox->wb_vaddr; addr_h = upper_32_bits(send_mbox->wb_paddr); addr_l = lower_32_bits(send_mbox->wb_paddr); hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF, addr_h); hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF, addr_l); return 0; } static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func) { struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox; struct hinic_hwdev *hwdev = func_to_func->hwdev; hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF, 0); hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF, 0); dma_free_coherent(&hwdev->hwif->pdev->dev, MBOX_WB_STATUS_LEN, send_mbox->wb_vaddr, send_mbox->wb_paddr); } bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev, struct vf_cmd_check_handle *cmd_handle, u16 vf_id, u8 cmd, void *buf_in, u16 in_size, u8 size) { u16 src_idx = vf_id + hinic_glb_pf_vf_offset(hwdev->hwif); int i; for (i = 0; i < size; i++) { if (cmd == cmd_handle[i].cmd) { if (cmd_handle[i].check_cmd) return cmd_handle[i].check_cmd(hwdev, src_idx, buf_in, in_size); else return true; } } dev_err(&hwdev->hwif->pdev->dev, "PF Receive VF(%d) unsupported cmd(0x%x)\n", vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd); return false; } static bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev, struct hinic_cmdq_ctxt *cmdq_ctxt) { struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; u64 curr_pg_pfn, wq_block_pfn; if (cmdq_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif) || cmdq_ctxt->cmdq_type > HINIC_MAX_CMDQ_TYPES) return false; curr_pg_pfn = HINIC_CMDQ_CTXT_PAGE_INFO_GET (ctxt_info->curr_wqe_page_pfn, CURR_WQE_PAGE_PFN); wq_block_pfn = HINIC_CMDQ_CTXT_BLOCK_INFO_GET (ctxt_info->wq_block_pfn, WQ_BLOCK_PFN); /* VF must use 0-level CLA */ if (curr_pg_pfn != wq_block_pfn) return false; return true; } static bool check_cmdq_ctxt(struct hinic_hwdev *hwdev, u16 func_idx, void *buf_in, u16 in_size) { if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) return false; return hinic_cmdq_check_vf_ctxt(hwdev, buf_in); } #define HW_CTX_QPS_VALID(hw_ctxt) \ ((hw_ctxt)->rq_depth >= HINIC_QUEUE_MIN_DEPTH && \ (hw_ctxt)->rq_depth <= HINIC_QUEUE_MAX_DEPTH && \ (hw_ctxt)->sq_depth >= HINIC_QUEUE_MIN_DEPTH && \ (hw_ctxt)->sq_depth <= HINIC_QUEUE_MAX_DEPTH && \ (hw_ctxt)->rx_buf_sz_idx <= HINIC_MAX_RX_BUFFER_SIZE) static bool hw_ctxt_qps_param_valid(struct hinic_cmd_hw_ioctxt *hw_ctxt) { if (HW_CTX_QPS_VALID(hw_ctxt)) return true; if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth && !hw_ctxt->rx_buf_sz_idx) return true; return false; } static bool check_hwctxt(struct hinic_hwdev *hwdev, u16 func_idx, void *buf_in, u16 in_size) { struct hinic_cmd_hw_ioctxt *hw_ctxt = buf_in; if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) return false; if (hw_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif)) return false; if (hw_ctxt->set_cmdq_depth) { if (hw_ctxt->cmdq_depth >= HINIC_QUEUE_MIN_DEPTH && hw_ctxt->cmdq_depth <= HINIC_QUEUE_MAX_DEPTH) return true; return false; } return hw_ctxt_qps_param_valid(hw_ctxt); } static bool check_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx, void *buf_in, u16 in_size) { struct hinic_wq_page_size *page_size_info = buf_in; if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) return false; if (page_size_info->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif)) return false; if (((1U << page_size_info->page_size) * SZ_4K) != HINIC_DEFAULT_WQ_PAGE_SIZE) return false; return true; } static struct vf_cmd_check_handle hw_cmd_support_vf[] = { {HINIC_COMM_CMD_START_FLR, hinic_mbox_check_func_id_8B}, {HINIC_COMM_CMD_DMA_ATTR_SET, hinic_mbox_check_func_id_8B}, {HINIC_COMM_CMD_CMDQ_CTXT_SET, check_cmdq_ctxt}, {HINIC_COMM_CMD_CMDQ_CTXT_GET, check_cmdq_ctxt}, {HINIC_COMM_CMD_HWCTXT_SET, check_hwctxt}, {HINIC_COMM_CMD_HWCTXT_GET, check_hwctxt}, {HINIC_COMM_CMD_SQ_HI_CI_SET, hinic_mbox_check_func_id_8B}, {HINIC_COMM_CMD_RES_STATE_SET, hinic_mbox_check_func_id_8B}, {HINIC_COMM_CMD_IO_RES_CLEAR, hinic_mbox_check_func_id_8B}, {HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B}, {HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B}, {HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP, hinic_mbox_check_func_id_8B}, {HINIC_COMM_CMD_L2NIC_RESET, hinic_mbox_check_func_id_8B}, {HINIC_COMM_CMD_PAGESIZE_SET, check_set_wq_page_size}, }; static int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { u8 size = ARRAY_SIZE(hw_cmd_support_vf); struct hinic_hwdev *hwdev = handle; struct hinic_pfhwdev *pfhwdev; int err = 0; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); if (!hinic_mbox_check_cmd_valid(handle, hw_cmd_support_vf, vf_id, cmd, buf_in, in_size, size)) { dev_err(&hwdev->hwif->pdev->dev, "PF Receive VF: %d common cmd: 0x%x or mbox len: 0x%x is invalid\n", vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd, in_size); return HINIC_MBOX_VF_CMD_ERROR; } if (cmd == HINIC_COMM_CMD_START_FLR) { *out_size = 0; } else { err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, cmd, buf_in, in_size, buf_out, out_size, HINIC_MGMT_MSG_SYNC); if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) dev_err(&hwdev->hwif->pdev->dev, "PF mbox common callback handler err: %d\n", err); } return err; } int hinic_func_to_func_init(struct hinic_hwdev *hwdev) { struct hinic_mbox_func_to_func *func_to_func; struct hinic_pfhwdev *pfhwdev; int err; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL); if (!func_to_func) return -ENOMEM; hwdev->func_to_func = func_to_func; func_to_func->hwdev = hwdev; func_to_func->hwif = hwdev->hwif; sema_init(&func_to_func->mbox_send_sem, 1); sema_init(&func_to_func->msg_send_sem, 1); spin_lock_init(&func_to_func->mbox_lock); func_to_func->workq = create_singlethread_workqueue(HINIC_MBOX_WQ_NAME); if (!func_to_func->workq) { dev_err(&hwdev->hwif->pdev->dev, "Failed to initialize MBOX workqueue\n"); err = -ENOMEM; goto err_create_mbox_workq; } err = alloc_mbox_info(hwdev, func_to_func->mbox_send); if (err) { dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_active\n"); goto err_alloc_mbox_for_send; } err = alloc_mbox_info(hwdev, func_to_func->mbox_resp); if (err) { dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_passive\n"); goto err_alloc_mbox_for_resp; } err = alloc_mbox_wb_status(func_to_func); if (err) { dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mbox write back status\n"); goto err_alloc_wb_status; } prepare_send_mbox(func_to_func); hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC, &pfhwdev->hwdev, hinic_mbox_func_aeqe_handler); hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT, &pfhwdev->hwdev, hinic_mbox_self_aeqe_handler); if (!HINIC_IS_VF(hwdev->hwif)) hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM, comm_pf_mbox_handler); return 0; err_alloc_wb_status: free_mbox_info(func_to_func->mbox_resp); err_alloc_mbox_for_resp: free_mbox_info(func_to_func->mbox_send); err_alloc_mbox_for_send: destroy_workqueue(func_to_func->workq); err_create_mbox_workq: kfree(func_to_func); return err; } void hinic_func_to_func_free(struct hinic_hwdev *hwdev) { struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC); hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT); hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM); /* destroy workqueue before free related mbox resources in case of * illegal resource access */ destroy_workqueue(func_to_func->workq); free_mbox_wb_status(func_to_func); free_mbox_info(func_to_func->mbox_resp); free_mbox_info(func_to_func->mbox_send); kfree(func_to_func); } int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev) { u16 vf_offset; u8 vf_in_pf; int err = 0; if (HINIC_IS_VF(hwdev->hwif)) return 0; vf_offset = hinic_glb_pf_vf_offset(hwdev->hwif); for (vf_in_pf = 1; vf_in_pf <= hwdev->nic_cap.max_vf; vf_in_pf++) { err = set_vf_mbox_random_id(hwdev, vf_offset + vf_in_pf); if (err) break; } if (err == HINIC_MGMT_CMD_UNSUPPORTED) { hwdev->func_to_func->support_vf_random = false; err = 0; dev_warn(&hwdev->hwif->pdev->dev, "Mgmt is unsupported to set VF%d random id\n", vf_in_pf - 1); } else if (!err) { hwdev->func_to_func->support_vf_random = true; } return err; }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
// SPDX-License-Identifier: GPL-2.0-only /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/spinlock.h> #include <linux/sizes.h> #include <linux/atomic.h> #include <linux/log2.h> #include <linux/io.h> #include <linux/completion.h> #include <linux/err.h> #include <asm/byteorder.h> #include <asm/barrier.h> #include "hinic_common.h" #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" #include "hinic_hw_mgmt.h" #include "hinic_hw_wqe.h" #include "hinic_hw_wq.h" #include "hinic_hw_cmdq.h" #include "hinic_hw_io.h" #include "hinic_hw_dev.h" #define CMDQ_CEQE_TYPE_SHIFT 0 #define CMDQ_CEQE_TYPE_MASK 0x7 #define CMDQ_CEQE_GET(val, member) \ (((val) >> CMDQ_CEQE_##member##_SHIFT) \ & CMDQ_CEQE_##member##_MASK) #define CMDQ_WQE_ERRCODE_VAL_SHIFT 20 #define CMDQ_WQE_ERRCODE_VAL_MASK 0xF #define CMDQ_WQE_ERRCODE_GET(val, member) \ (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \ & CMDQ_WQE_ERRCODE_##member##_MASK) #define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) #define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi)) #define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe)) #define CMDQ_WQE_COMPLETED(ctrl_info) \ HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) #define FIRST_DATA_TO_WRITE_LAST sizeof(u64) #define CMDQ_DB_OFF SZ_2K #define CMDQ_WQEBB_SIZE 64 #define CMDQ_WQE_SIZE 64 #define CMDQ_DEPTH SZ_4K #define CMDQ_WQ_PAGE_SIZE SZ_256K #define WQE_LCMD_SIZE 64 #define WQE_SCMD_SIZE 64 #define COMPLETE_LEN 3 #define CMDQ_TIMEOUT 1000 #define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size))) #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ struct hinic_cmdqs, cmdq[0]) #define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \ struct hinic_func_to_io, \ cmdqs) enum completion_format { COMPLETE_DIRECT = 0, COMPLETE_SGE = 1, }; enum data_format { DATA_SGE = 0, DATA_DIRECT = 1, }; enum bufdesc_len { BUFDESC_LCMD_LEN = 2, /* 16 bytes - 2(8 byte unit) */ BUFDESC_SCMD_LEN = 3, /* 24 bytes - 3(8 byte unit) */ }; enum ctrl_sect_len { CTRL_SECT_LEN = 1, /* 4 bytes (ctrl) - 1(8 byte unit) */ CTRL_DIRECT_SECT_LEN = 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */ }; enum cmdq_scmd_type { CMDQ_SET_ARM_CMD = 2, }; enum cmdq_cmd_type { CMDQ_CMD_SYNC_DIRECT_RESP = 0, CMDQ_CMD_SYNC_SGE_RESP = 1, }; enum completion_request { NO_CEQ = 0, CEQ_SET = 1, }; /** * hinic_alloc_cmdq_buf - alloc buffer for sending command * @cmdqs: the cmdqs * @cmdq_buf: the buffer returned in this struct * * Return 0 - Success, negative - Failure **/ int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs, struct hinic_cmdq_buf *cmdq_buf) { struct hinic_hwif *hwif = cmdqs->hwif; struct pci_dev *pdev = hwif->pdev; cmdq_buf->buf = dma_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL, &cmdq_buf->dma_addr); if (!cmdq_buf->buf) { dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n"); return -ENOMEM; } return 0; } /** * hinic_free_cmdq_buf - free buffer * @cmdqs: the cmdqs * @cmdq_buf: the buffer to free that is in this struct **/ void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs, struct hinic_cmdq_buf *cmdq_buf) { dma_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr); } static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len) { unsigned int wqe_size = 0; switch (len) { case BUFDESC_LCMD_LEN: wqe_size = WQE_LCMD_SIZE; break; case BUFDESC_SCMD_LEN: wqe_size = WQE_SCMD_SIZE; break; } return wqe_size; } static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion, struct hinic_cmdq_buf *buf_out) { struct hinic_sge_resp *sge_resp = &completion->sge_resp; hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size); } static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped, enum hinic_cmd_ack_type ack_type, enum hinic_mod_type mod, u8 cmd, u16 prod_idx, enum completion_format complete_format, enum data_format data_format, enum bufdesc_len buf_len) { struct hinic_cmdq_wqe_lcmd *wqe_lcmd; struct hinic_cmdq_wqe_scmd *wqe_scmd; enum ctrl_sect_len ctrl_len; struct hinic_ctrl *ctrl; u32 saved_data; if (data_format == DATA_SGE) { wqe_lcmd = &wqe->wqe_lcmd; wqe_lcmd->status.status_info = 0; ctrl = &wqe_lcmd->ctrl; ctrl_len = CTRL_SECT_LEN; } else { wqe_scmd = &wqe->direct_wqe.wqe_scmd; wqe_scmd->status.status_info = 0; ctrl = &wqe_scmd->ctrl; ctrl_len = CTRL_DIRECT_SECT_LEN; } ctrl->ctrl_info = HINIC_CMDQ_CTRL_SET(prod_idx, PI) | HINIC_CMDQ_CTRL_SET(cmd, CMD) | HINIC_CMDQ_CTRL_SET(mod, MOD) | HINIC_CMDQ_CTRL_SET(ack_type, ACK_TYPE); CMDQ_WQE_HEADER(wqe)->header_info = HINIC_CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) | HINIC_CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) | HINIC_CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) | HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) | HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) | HINIC_CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) | HINIC_CMDQ_WQE_HEADER_SET(wrapped, TOGGLED_WRAPPED); saved_data = CMDQ_WQE_HEADER(wqe)->saved_data; saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM); if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM) CMDQ_WQE_HEADER(wqe)->saved_data |= HINIC_SAVED_DATA_SET(1, ARM); else CMDQ_WQE_HEADER(wqe)->saved_data = saved_data; } static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd, struct hinic_cmdq_buf *buf_in) { hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size); } static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe, void *buf_in, u32 in_size) { struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; wqe_scmd->buf_desc.buf_len = in_size; memcpy(wqe_scmd->buf_desc.data, buf_in, in_size); } static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe, enum cmdq_cmd_type cmd_type, struct hinic_cmdq_buf *buf_in, struct hinic_cmdq_buf *buf_out, int wrapped, enum hinic_cmd_ack_type ack_type, enum hinic_mod_type mod, u8 cmd, u16 prod_idx) { struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; enum completion_format complete_format; switch (cmd_type) { case CMDQ_CMD_SYNC_SGE_RESP: complete_format = COMPLETE_SGE; cmdq_set_sge_completion(&wqe_lcmd->completion, buf_out); break; case CMDQ_CMD_SYNC_DIRECT_RESP: complete_format = COMPLETE_DIRECT; wqe_lcmd->completion.direct_resp = 0; break; } cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx, complete_format, DATA_SGE, BUFDESC_LCMD_LEN); cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); } static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe, enum cmdq_cmd_type cmd_type, void *buf_in, u16 in_size, struct hinic_cmdq_buf *buf_out, int wrapped, enum hinic_cmd_ack_type ack_type, enum hinic_mod_type mod, u8 cmd, u16 prod_idx) { struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; enum completion_format complete_format; struct hinic_cmdq_wqe_scmd *wqe_scmd; wqe_scmd = &direct_wqe->wqe_scmd; switch (cmd_type) { case CMDQ_CMD_SYNC_SGE_RESP: complete_format = COMPLETE_SGE; cmdq_set_sge_completion(&wqe_scmd->completion, buf_out); break; case CMDQ_CMD_SYNC_DIRECT_RESP: complete_format = COMPLETE_DIRECT; wqe_scmd->completion.direct_resp = 0; break; } cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx, complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN); cmdq_set_direct_wqe_data(direct_wqe, buf_in, in_size); } static void cmdq_wqe_fill(void *dst, void *src) { memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST, CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST); wmb(); /* The first 8 bytes should be written last */ *(u64 *)dst = *(u64 *)src; } static void cmdq_fill_db(u32 *db_info, enum hinic_cmdq_type cmdq_type, u16 prod_idx) { *db_info = HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) | HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH, PATH) | HINIC_CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) | HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, DB_TYPE); } static void cmdq_set_db(struct hinic_cmdq *cmdq, enum hinic_cmdq_type cmdq_type, u16 prod_idx) { u32 db_info; cmdq_fill_db(&db_info, cmdq_type, prod_idx); /* The data that is written to HW should be in Big Endian Format */ db_info = cpu_to_be32(db_info); wmb(); /* write all before the doorbell */ writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); } static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, enum hinic_mod_type mod, u8 cmd, struct hinic_cmdq_buf *buf_in, u64 *resp) { struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe; u16 curr_prod_idx, next_prod_idx; int errcode, wrapped, num_wqebbs; struct hinic_wq *wq = cmdq->wq; struct hinic_hw_wqe *hw_wqe; struct completion done; /* Keep doorbell index correct. bh - for tasklet(ceq). */ spin_lock_bh(&cmdq->cmdq_lock); /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, &curr_prod_idx); if (IS_ERR(hw_wqe)) { spin_unlock_bh(&cmdq->cmdq_lock); return -EBUSY; } curr_cmdq_wqe = &hw_wqe->cmdq_wqe; wrapped = cmdq->wrapped; num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size; next_prod_idx = curr_prod_idx + num_wqebbs; if (next_prod_idx >= wq->q_depth) { cmdq->wrapped = !cmdq->wrapped; next_prod_idx -= wq->q_depth; } cmdq->errcode[curr_prod_idx] = &errcode; init_completion(&done); cmdq->done[curr_prod_idx] = &done; cmdq_set_lcmd_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ, mod, cmd, curr_prod_idx); /* The data that is written to HW should be in Big Endian Format */ hinic_cpu_to_be32(&cmdq_wqe, WQE_LCMD_SIZE); /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe); cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); spin_unlock_bh(&cmdq->cmdq_lock); if (!wait_for_completion_timeout(&done, msecs_to_jiffies(CMDQ_TIMEOUT))) { spin_lock_bh(&cmdq->cmdq_lock); if (cmdq->errcode[curr_prod_idx] == &errcode) cmdq->errcode[curr_prod_idx] = NULL; if (cmdq->done[curr_prod_idx] == &done) cmdq->done[curr_prod_idx] = NULL; spin_unlock_bh(&cmdq->cmdq_lock); hinic_dump_ceq_info(cmdq->hwdev); return -ETIMEDOUT; } smp_rmb(); /* read error code after completion */ if (resp) { struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &curr_cmdq_wqe->wqe_lcmd; *resp = cpu_to_be64(wqe_lcmd->completion.direct_resp); } if (errcode != 0) return -EFAULT; return 0; } static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in, u16 in_size) { struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe; u16 curr_prod_idx, next_prod_idx; struct hinic_wq *wq = cmdq->wq; struct hinic_hw_wqe *hw_wqe; int wrapped, num_wqebbs; /* Keep doorbell index correct */ spin_lock(&cmdq->cmdq_lock); /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx); if (IS_ERR(hw_wqe)) { spin_unlock(&cmdq->cmdq_lock); return -EBUSY; } curr_cmdq_wqe = &hw_wqe->cmdq_wqe; wrapped = cmdq->wrapped; num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size; next_prod_idx = curr_prod_idx + num_wqebbs; if (next_prod_idx >= wq->q_depth) { cmdq->wrapped = !cmdq->wrapped; next_prod_idx -= wq->q_depth; } cmdq_set_direct_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, in_size, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ, HINIC_MOD_COMM, CMDQ_SET_ARM_CMD, curr_prod_idx); /* The data that is written to HW should be in Big Endian Format */ hinic_cpu_to_be32(&cmdq_wqe, WQE_SCMD_SIZE); /* cmdq wqe is not shadow, therefore wqe will be written to wq */ cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe); cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); spin_unlock(&cmdq->cmdq_lock); return 0; } static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in) { if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) return -EINVAL; return 0; } /** * hinic_cmdq_direct_resp - send command with direct data as resp * @cmdqs: the cmdqs * @mod: module on the card that will handle the command * @cmd: the command * @buf_in: the buffer for the command * @resp: the response to return * * Return 0 - Success, negative - Failure **/ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, enum hinic_mod_type mod, u8 cmd, struct hinic_cmdq_buf *buf_in, u64 *resp) { struct hinic_hwif *hwif = cmdqs->hwif; struct pci_dev *pdev = hwif->pdev; int err; err = cmdq_params_valid(buf_in); if (err) { dev_err(&pdev->dev, "Invalid CMDQ parameters\n"); return err; } return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC], mod, cmd, buf_in, resp); } /** * hinic_set_arm_bit - set arm bit for enable interrupt again * @cmdqs: the cmdqs * @q_type: type of queue to set the arm bit for * @q_id: the queue number * * Return 0 - Success, negative - Failure **/ static int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs, enum hinic_set_arm_qtype q_type, u32 q_id) { struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC]; struct hinic_hwif *hwif = cmdqs->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_cmdq_arm_bit arm_bit; int err; arm_bit.q_type = q_type; arm_bit.q_id = q_id; err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit)); if (err) { dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id); return err; } return 0; } static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq, struct hinic_cmdq_wqe *wqe) { u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info); unsigned int bufdesc_len, wqe_size; struct hinic_ctrl *ctrl; bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN); wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len); if (wqe_size == WQE_LCMD_SIZE) { struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; ctrl = &wqe_lcmd->ctrl; } else { struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; struct hinic_cmdq_wqe_scmd *wqe_scmd; wqe_scmd = &direct_wqe->wqe_scmd; ctrl = &wqe_scmd->ctrl; } /* clear HW busy bit */ ctrl->ctrl_info = 0; wmb(); /* verify wqe is clear */ } /** * cmdq_arm_ceq_handler - cmdq completion event handler for arm command * @cmdq: the cmdq of the arm command * @wqe: the wqe of the arm command * * Return 0 - Success, negative - Failure **/ static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq, struct hinic_cmdq_wqe *wqe) { struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; struct hinic_cmdq_wqe_scmd *wqe_scmd; struct hinic_ctrl *ctrl; u32 ctrl_info; wqe_scmd = &direct_wqe->wqe_scmd; ctrl = &wqe_scmd->ctrl; ctrl_info = be32_to_cpu(ctrl->ctrl_info); /* HW should toggle the HW BUSY BIT */ if (!CMDQ_WQE_COMPLETED(ctrl_info)) return -EBUSY; clear_wqe_complete_bit(cmdq, wqe); hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE); return 0; } static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx, int errcode) { if (cmdq->errcode[prod_idx]) *cmdq->errcode[prod_idx] = errcode; } /** * cmdq_sync_cmd_handler - cmdq completion event handler for sync command * @cmdq: the cmdq of the command * @cons_idx: the consumer index to update the error code for * @errcode: the error code **/ static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx, int errcode) { u16 prod_idx = cons_idx; spin_lock(&cmdq->cmdq_lock); cmdq_update_errcode(cmdq, prod_idx, errcode); wmb(); /* write all before update for the command request */ if (cmdq->done[prod_idx]) complete(cmdq->done[prod_idx]); spin_unlock(&cmdq->cmdq_lock); } static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci, struct hinic_cmdq_wqe *cmdq_wqe) { struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd; struct hinic_status *status = &wqe_lcmd->status; struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl; int errcode; if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info))) return -EBUSY; dma_rmb(); errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL); cmdq_sync_cmd_handler(cmdq, ci, errcode); clear_wqe_complete_bit(cmdq, cmdq_wqe); hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE); return 0; } /** * cmdq_ceq_handler - cmdq completion event handler * @handle: private data for the handler(cmdqs) * @ceqe_data: ceq element data **/ static void cmdq_ceq_handler(void *handle, u32 ceqe_data) { enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE); struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle; struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; struct hinic_cmdq_header *header; struct hinic_hw_wqe *hw_wqe; int err, set_arm = 0; u32 saved_data; u16 ci; /* Read the smallest wqe size for getting wqe size */ while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) { if (IS_ERR(hw_wqe)) break; header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe); saved_data = be32_to_cpu(header->saved_data); if (HINIC_SAVED_DATA_GET(saved_data, ARM)) { /* arm_bit was set until here */ set_arm = 0; if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe)) break; } else { set_arm = 1; hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci); if (IS_ERR(hw_wqe)) break; if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe)) break; } } if (set_arm) { struct hinic_hwif *hwif = cmdqs->hwif; struct pci_dev *pdev = hwif->pdev; err = hinic_set_arm_bit(cmdqs, HINIC_SET_ARM_CMDQ, cmdq_type); if (err) dev_err(&pdev->dev, "Failed to set arm for CMDQ\n"); } } /** * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq * @cmdq_ctxt: cmdq ctxt to initialize * @cmdq: the cmdq * @cmdq_pages: the memory of the queue **/ static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt, struct hinic_cmdq *cmdq, struct hinic_cmdq_pages *cmdq_pages) { struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn; struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); struct hinic_wq *wq = cmdq->wq; /* The data in the HW is in Big Endian Format */ wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr); pfn = CMDQ_PFN(wq_first_page_paddr, SZ_4K); ctxt_info->curr_wqe_page_pfn = HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) | HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) | HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) | HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED); if (wq->num_q_pages != 1) { /* block PFN - Read Modify Write */ cmdq_first_block_paddr = cmdq_pages->page_paddr; pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size); } ctxt_info->wq_block_pfn = HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) | HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI); cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif); cmdq_ctxt->ppf_idx = HINIC_HWIF_PPF_IDX(cmdqs->hwif); cmdq_ctxt->cmdq_type = cmdq->cmdq_type; } /** * init_cmdq - initialize cmdq * @cmdq: the cmdq * @wq: the wq attaced to the cmdq * @q_type: the cmdq type of the cmdq * @db_area: doorbell area for the cmdq * * Return 0 - Success, negative - Failure **/ static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq, enum hinic_cmdq_type q_type, void __iomem *db_area) { int err; cmdq->wq = wq; cmdq->cmdq_type = q_type; cmdq->wrapped = 1; spin_lock_init(&cmdq->cmdq_lock); cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth)); if (!cmdq->done) return -ENOMEM; cmdq->errcode = vzalloc(array_size(sizeof(*cmdq->errcode), wq->q_depth)); if (!cmdq->errcode) { err = -ENOMEM; goto err_errcode; } cmdq->db_base = db_area + CMDQ_DB_OFF; return 0; err_errcode: vfree(cmdq->done); return err; } /** * free_cmdq - Free cmdq * @cmdq: the cmdq to free **/ static void free_cmdq(struct hinic_cmdq *cmdq) { vfree(cmdq->errcode); vfree(cmdq->done); } /** * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq * @hwdev: the NIC HW device * @cmdqs: cmdqs to write the ctxts for * @db_area: db_area for all the cmdqs * * Return 0 - Success, negative - Failure **/ static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev, struct hinic_cmdqs *cmdqs, void __iomem **db_area) { struct hinic_hwif *hwif = hwdev->hwif; enum hinic_cmdq_type type, cmdq_type; struct hinic_cmdq_ctxt *cmdq_ctxts; struct pci_dev *pdev = hwif->pdev; struct hinic_pfhwdev *pfhwdev; int err; cmdq_ctxts = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES, sizeof(*cmdq_ctxts), GFP_KERNEL); if (!cmdq_ctxts) return -ENOMEM; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); cmdq_type = HINIC_CMDQ_SYNC; for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { cmdqs->cmdq[cmdq_type].hwdev = hwdev; err = init_cmdq(&cmdqs->cmdq[cmdq_type], &cmdqs->saved_wqs[cmdq_type], cmdq_type, db_area[cmdq_type]); if (err) { dev_err(&pdev->dev, "Failed to initialize cmdq\n"); goto err_init_cmdq; } cmdq_init_queue_ctxt(&cmdq_ctxts[cmdq_type], &cmdqs->cmdq[cmdq_type], &cmdqs->cmdq_pages); } /* Write the CMDQ ctxts */ cmdq_type = HINIC_CMDQ_SYNC; for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, HINIC_COMM_CMD_CMDQ_CTXT_SET, &cmdq_ctxts[cmdq_type], sizeof(cmdq_ctxts[cmdq_type]), NULL, NULL, HINIC_MGMT_MSG_SYNC); if (err) { dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n", cmdq_type); goto err_write_cmdq_ctxt; } } devm_kfree(&pdev->dev, cmdq_ctxts); return 0; err_write_cmdq_ctxt: cmdq_type = HINIC_MAX_CMDQ_TYPES; err_init_cmdq: for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++) free_cmdq(&cmdqs->cmdq[type]); devm_kfree(&pdev->dev, cmdq_ctxts); return err; } static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth) { struct hinic_cmd_hw_ioctxt hw_ioctxt = { 0 }; struct hinic_pfhwdev *pfhwdev; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif); hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_ENABLE; hw_ioctxt.cmdq_depth = (u8)ilog2(cmdq_depth); return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, HINIC_COMM_CMD_HWCTXT_SET, &hw_ioctxt, sizeof(hw_ioctxt), NULL, NULL, HINIC_MGMT_MSG_SYNC); } /** * hinic_init_cmdqs - init all cmdqs * @cmdqs: cmdqs to init * @hwif: HW interface for accessing cmdqs * @db_area: doorbell areas for all the cmdqs * * Return 0 - Success, negative - Failure **/ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, void __iomem **db_area) { struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs); struct pci_dev *pdev = hwif->pdev; struct hinic_hwdev *hwdev; u16 max_wqe_size; int err; cmdqs->hwif = hwif; cmdqs->cmdq_buf_pool = dma_pool_create("hinic_cmdq", &pdev->dev, HINIC_CMDQ_BUF_SIZE, HINIC_CMDQ_BUF_SIZE, 0); if (!cmdqs->cmdq_buf_pool) return -ENOMEM; cmdqs->saved_wqs = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES, sizeof(*cmdqs->saved_wqs), GFP_KERNEL); if (!cmdqs->saved_wqs) { err = -ENOMEM; goto err_saved_wqs; } max_wqe_size = WQE_LCMD_SIZE; err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif, HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE, CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size); if (err) { dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n"); goto err_cmdq_wqs; } hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io); err = init_cmdqs_ctxt(hwdev, cmdqs, db_area); if (err) { dev_err(&pdev->dev, "Failed to write cmdq ctxt\n"); goto err_cmdq_ctxt; } hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs, cmdq_ceq_handler); err = hinic_set_cmdq_depth(hwdev, CMDQ_DEPTH); if (err) { dev_err(&hwif->pdev->dev, "Failed to set cmdq depth\n"); goto err_set_cmdq_depth; } return 0; err_set_cmdq_depth: hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ); free_cmdq(&cmdqs->cmdq[HINIC_CMDQ_SYNC]); err_cmdq_ctxt: hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, HINIC_MAX_CMDQ_TYPES); err_cmdq_wqs: devm_kfree(&pdev->dev, cmdqs->saved_wqs); err_saved_wqs: dma_pool_destroy(cmdqs->cmdq_buf_pool); return err; } /** * hinic_free_cmdqs - free all cmdqs * @cmdqs: cmdqs to free **/ void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs) { struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs); struct hinic_hwif *hwif = cmdqs->hwif; struct pci_dev *pdev = hwif->pdev; enum hinic_cmdq_type cmdq_type; hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ); cmdq_type = HINIC_CMDQ_SYNC; for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) free_cmdq(&cmdqs->cmdq[cmdq_type]); hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, HINIC_MAX_CMDQ_TYPES); devm_kfree(&pdev->dev, cmdqs->saved_wqs); dma_pool_destroy(cmdqs->cmdq_buf_pool); }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
// SPDX-License-Identifier: GPL-2.0 /* Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * */ #include <linux/netlink.h> #include <net/devlink.h> #include <linux/firmware.h> #include "hinic_port.h" #include "hinic_devlink.h" #include "hinic_hw_dev.h" static bool check_image_valid(struct hinic_devlink_priv *priv, const u8 *buf, u32 image_size, struct host_image_st *host_image) { struct fw_image_st *fw_image = NULL; u32 len = 0; u32 i; fw_image = (struct fw_image_st *)buf; if (fw_image->fw_magic != HINIC_MAGIC_NUM) { dev_err(&priv->hwdev->hwif->pdev->dev, "Wrong fw_magic read from file, fw_magic: 0x%x\n", fw_image->fw_magic); return false; } if (fw_image->fw_info.fw_section_cnt > MAX_FW_TYPE_NUM) { dev_err(&priv->hwdev->hwif->pdev->dev, "Wrong fw_type_num read from file, fw_type_num: 0x%x\n", fw_image->fw_info.fw_section_cnt); return false; } for (i = 0; i < fw_image->fw_info.fw_section_cnt; i++) { len += fw_image->fw_section_info[i].fw_section_len; host_image->image_section_info[i] = fw_image->fw_section_info[i]; } if (len != fw_image->fw_len || (fw_image->fw_len + UPDATEFW_IMAGE_HEAD_SIZE) != image_size) { dev_err(&priv->hwdev->hwif->pdev->dev, "Wrong data size read from file\n"); return false; } host_image->image_info.up_total_len = fw_image->fw_len; host_image->image_info.fw_version = fw_image->fw_version; host_image->section_type_num = fw_image->fw_info.fw_section_cnt; host_image->device_id = fw_image->device_id; return true; } static bool check_image_integrity(struct hinic_devlink_priv *priv, struct host_image_st *host_image, u32 update_type) { u32 collect_section_type = 0; u32 i, type; for (i = 0; i < host_image->section_type_num; i++) { type = host_image->image_section_info[i].fw_section_type; if (collect_section_type & (1U << type)) { dev_err(&priv->hwdev->hwif->pdev->dev, "Duplicate section type: %u\n", type); return false; } collect_section_type |= (1U << type); } if (update_type == FW_UPDATE_COLD && (((collect_section_type & _IMAGE_COLD_SUB_MODULES_MUST_IN) == _IMAGE_COLD_SUB_MODULES_MUST_IN) || collect_section_type == _IMAGE_CFG_SUB_MODULES_MUST_IN)) return true; if (update_type == FW_UPDATE_HOT && (collect_section_type & _IMAGE_HOT_SUB_MODULES_MUST_IN) == _IMAGE_HOT_SUB_MODULES_MUST_IN) return true; if (update_type == FW_UPDATE_COLD) dev_err(&priv->hwdev->hwif->pdev->dev, "Check file integrity failed, valid: 0x%x or 0x%lx, current: 0x%x\n", _IMAGE_COLD_SUB_MODULES_MUST_IN, _IMAGE_CFG_SUB_MODULES_MUST_IN, collect_section_type); else dev_err(&priv->hwdev->hwif->pdev->dev, "Check file integrity failed, valid:0x%x, current: 0x%x\n", _IMAGE_HOT_SUB_MODULES_MUST_IN, collect_section_type); return false; } static int check_image_device_type(struct hinic_devlink_priv *priv, u32 image_device_type) { struct hinic_comm_board_info board_info = {0}; if (hinic_get_board_info(priv->hwdev, &board_info)) { dev_err(&priv->hwdev->hwif->pdev->dev, "Get board info failed\n"); return false; } if (image_device_type == board_info.info.board_type) return true; dev_err(&priv->hwdev->hwif->pdev->dev, "The device type of upgrade file doesn't match the device type of current firmware, please check the upgrade file\n"); dev_err(&priv->hwdev->hwif->pdev->dev, "The image device type: 0x%x, firmware device type: 0x%x\n", image_device_type, board_info.info.board_type); return false; } static int hinic_flash_fw(struct hinic_devlink_priv *priv, const u8 *data, struct host_image_st *host_image) { u32 section_remain_send_len, send_fragment_len, send_pos, up_total_len; struct hinic_cmd_update_fw *fw_update_msg = NULL; u32 section_type, section_crc, section_version; u32 i, len, section_len, section_offset; u16 out_size = sizeof(*fw_update_msg); int total_len_flag = 0; int err; fw_update_msg = kzalloc(sizeof(*fw_update_msg), GFP_KERNEL); if (!fw_update_msg) return -ENOMEM; up_total_len = host_image->image_info.up_total_len; for (i = 0; i < host_image->section_type_num; i++) { len = host_image->image_section_info[i].fw_section_len; if (host_image->image_section_info[i].fw_section_type == UP_FW_UPDATE_BOOT) { up_total_len = up_total_len - len; break; } } for (i = 0; i < host_image->section_type_num; i++) { section_len = host_image->image_section_info[i].fw_section_len; section_offset = host_image->image_section_info[i].fw_section_offset; section_remain_send_len = section_len; section_type = host_image->image_section_info[i].fw_section_type; section_crc = host_image->image_section_info[i].fw_section_crc; section_version = host_image->image_section_info[i].fw_section_version; if (section_type == UP_FW_UPDATE_BOOT) continue; send_fragment_len = 0; send_pos = 0; while (section_remain_send_len > 0) { if (!total_len_flag) { fw_update_msg->total_len = up_total_len; total_len_flag = 1; } else { fw_update_msg->total_len = 0; } memset(fw_update_msg->data, 0, MAX_FW_FRAGMENT_LEN); fw_update_msg->ctl_info.SF = (section_remain_send_len == section_len) ? true : false; fw_update_msg->section_info.FW_section_CRC = section_crc; fw_update_msg->fw_section_version = section_version; fw_update_msg->ctl_info.flag = UP_TYPE_A; if (section_type <= UP_FW_UPDATE_UP_DATA_B) { fw_update_msg->section_info.FW_section_type = (section_type % 2) ? UP_FW_UPDATE_UP_DATA : UP_FW_UPDATE_UP_TEXT; fw_update_msg->ctl_info.flag = UP_TYPE_B; if (section_type <= UP_FW_UPDATE_UP_DATA_A) fw_update_msg->ctl_info.flag = UP_TYPE_A; } else { fw_update_msg->section_info.FW_section_type = section_type - 0x2; } fw_update_msg->setion_total_len = section_len; fw_update_msg->section_offset = send_pos; if (section_remain_send_len <= MAX_FW_FRAGMENT_LEN) { fw_update_msg->ctl_info.SL = true; fw_update_msg->ctl_info.fragment_len = section_remain_send_len; send_fragment_len += section_remain_send_len; } else { fw_update_msg->ctl_info.SL = false; fw_update_msg->ctl_info.fragment_len = MAX_FW_FRAGMENT_LEN; send_fragment_len += MAX_FW_FRAGMENT_LEN; } memcpy(fw_update_msg->data, data + UPDATEFW_IMAGE_HEAD_SIZE + section_offset + send_pos, fw_update_msg->ctl_info.fragment_len); err = hinic_port_msg_cmd(priv->hwdev, HINIC_PORT_CMD_UPDATE_FW, fw_update_msg, sizeof(*fw_update_msg), fw_update_msg, &out_size); if (err || !out_size || fw_update_msg->status) { dev_err(&priv->hwdev->hwif->pdev->dev, "Failed to update firmware, err: %d, status: 0x%x, out size: 0x%x\n", err, fw_update_msg->status, out_size); err = fw_update_msg->status ? fw_update_msg->status : -EIO; kfree(fw_update_msg); return err; } send_pos = send_fragment_len; section_remain_send_len = section_len - send_fragment_len; } } kfree(fw_update_msg); return 0; } static int hinic_firmware_update(struct hinic_devlink_priv *priv, const struct firmware *fw, struct netlink_ext_ack *extack) { struct host_image_st host_image; int err; memset(&host_image, 0, sizeof(struct host_image_st)); if (!check_image_valid(priv, fw->data, fw->size, &host_image) || !check_image_integrity(priv, &host_image, FW_UPDATE_COLD) || !check_image_device_type(priv, host_image.device_id)) { NL_SET_ERR_MSG_MOD(extack, "Check image failed"); return -EINVAL; } dev_info(&priv->hwdev->hwif->pdev->dev, "Flash firmware begin\n"); err = hinic_flash_fw(priv, fw->data, &host_image); if (err) { if (err == HINIC_FW_DISMATCH_ERROR) { dev_err(&priv->hwdev->hwif->pdev->dev, "Firmware image doesn't match this card, please use newer image, err: %d\n", err); NL_SET_ERR_MSG_MOD(extack, "Firmware image doesn't match this card, please use newer image"); } else { dev_err(&priv->hwdev->hwif->pdev->dev, "Send firmware image data failed, err: %d\n", err); NL_SET_ERR_MSG_MOD(extack, "Send firmware image data failed"); } return err; } dev_info(&priv->hwdev->hwif->pdev->dev, "Flash firmware end\n"); return 0; } static int hinic_devlink_flash_update(struct devlink *devlink, struct devlink_flash_update_params *params, struct netlink_ext_ack *extack) { struct hinic_devlink_priv *priv = devlink_priv(devlink); return hinic_firmware_update(priv, params->fw, extack); } static const struct devlink_ops hinic_devlink_ops = { .flash_update = hinic_devlink_flash_update, }; struct devlink *hinic_devlink_alloc(struct device *dev) { return devlink_alloc(&hinic_devlink_ops, sizeof(struct hinic_dev), dev); } void hinic_devlink_free(struct devlink *devlink) { devlink_free(devlink); } void hinic_devlink_register(struct hinic_devlink_priv *priv) { struct devlink *devlink = priv_to_devlink(priv); devlink_register(devlink); } void hinic_devlink_unregister(struct hinic_devlink_priv *priv) { struct devlink *devlink = priv_to_devlink(priv); devlink_unregister(devlink); } static int chip_fault_show(struct devlink_fmsg *fmsg, struct hinic_fault_event *event) { const char * const level_str[FAULT_LEVEL_MAX + 1] = { "fatal", "reset", "flr", "general", "suggestion", "Unknown"}; u8 fault_level; int err; fault_level = (event->event.chip.err_level < FAULT_LEVEL_MAX) ? event->event.chip.err_level : FAULT_LEVEL_MAX; if (fault_level == FAULT_LEVEL_SERIOUS_FLR) { err = devlink_fmsg_u32_pair_put(fmsg, "Function level err func_id", (u32)event->event.chip.func_id); if (err) return err; } err = devlink_fmsg_u8_pair_put(fmsg, "module_id", event->event.chip.node_id); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "err_type", (u32)event->event.chip.err_type); if (err) return err; err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str[fault_level]); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_addr", event->event.chip.err_csr_addr); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_value", event->event.chip.err_csr_value); if (err) return err; return 0; } static int fault_report_show(struct devlink_fmsg *fmsg, struct hinic_fault_event *event) { const char * const type_str[FAULT_TYPE_MAX + 1] = { "chip", "ucode", "mem rd timeout", "mem wr timeout", "reg rd timeout", "reg wr timeout", "phy fault", "Unknown"}; u8 fault_type; int err; fault_type = (event->type < FAULT_TYPE_MAX) ? event->type : FAULT_TYPE_MAX; err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str[fault_type]); if (err) return err; err = devlink_fmsg_binary_pair_put(fmsg, "Fault raw data", event->event.val, sizeof(event->event.val)); if (err) return err; switch (event->type) { case FAULT_TYPE_CHIP: err = chip_fault_show(fmsg, event); if (err) return err; break; case FAULT_TYPE_UCODE: err = devlink_fmsg_u8_pair_put(fmsg, "Cause_id", event->event.ucode.cause_id); if (err) return err; err = devlink_fmsg_u8_pair_put(fmsg, "core_id", event->event.ucode.core_id); if (err) return err; err = devlink_fmsg_u8_pair_put(fmsg, "c_id", event->event.ucode.c_id); if (err) return err; err = devlink_fmsg_u8_pair_put(fmsg, "epc", event->event.ucode.epc); if (err) return err; break; case FAULT_TYPE_MEM_RD_TIMEOUT: case FAULT_TYPE_MEM_WR_TIMEOUT: err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr_ctrl", event->event.mem_timeout.err_csr_ctrl); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_data", event->event.mem_timeout.err_csr_data); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "ctrl_tab", event->event.mem_timeout.ctrl_tab); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "mem_index", event->event.mem_timeout.mem_index); if (err) return err; break; case FAULT_TYPE_REG_RD_TIMEOUT: case FAULT_TYPE_REG_WR_TIMEOUT: err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr", event->event.reg_timeout.err_csr); if (err) return err; break; case FAULT_TYPE_PHY_FAULT: err = devlink_fmsg_u8_pair_put(fmsg, "Op_type", event->event.phy_fault.op_type); if (err) return err; err = devlink_fmsg_u8_pair_put(fmsg, "port_id", event->event.phy_fault.port_id); if (err) return err; err = devlink_fmsg_u8_pair_put(fmsg, "dev_ad", event->event.phy_fault.dev_ad); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "csr_addr", event->event.phy_fault.csr_addr); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "op_data", event->event.phy_fault.op_data); if (err) return err; break; default: break; } return 0; } static int hinic_hw_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *priv_ctx, struct netlink_ext_ack *extack) { if (priv_ctx) return fault_report_show(fmsg, priv_ctx); return 0; } static int mgmt_watchdog_report_show(struct devlink_fmsg *fmsg, struct hinic_mgmt_watchdog_info *watchdog_info) { int err; err = devlink_fmsg_u32_pair_put(fmsg, "Mgmt deadloop time_h", watchdog_info->curr_time_h); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "time_l", watchdog_info->curr_time_l); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "task_id", watchdog_info->task_id); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "sp", watchdog_info->sp); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "stack_current_used", watchdog_info->curr_used); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "peak_used", watchdog_info->peak_used); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "\n Overflow_flag", watchdog_info->is_overflow); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "stack_top", watchdog_info->stack_top); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "stack_bottom", watchdog_info->stack_bottom); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "mgmt_pc", watchdog_info->pc); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "lr", watchdog_info->lr); if (err) return err; err = devlink_fmsg_u32_pair_put(fmsg, "cpsr", watchdog_info->cpsr); if (err) return err; err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt register info", watchdog_info->reg, sizeof(watchdog_info->reg)); if (err) return err; err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt dump stack(start from sp)", watchdog_info->data, sizeof(watchdog_info->data)); if (err) return err; return 0; } static int hinic_fw_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *priv_ctx, struct netlink_ext_ack *extack) { if (priv_ctx) return mgmt_watchdog_report_show(fmsg, priv_ctx); return 0; } static const struct devlink_health_reporter_ops hinic_hw_fault_reporter_ops = { .name = "hw", .dump = hinic_hw_reporter_dump, }; static const struct devlink_health_reporter_ops hinic_fw_fault_reporter_ops = { .name = "fw", .dump = hinic_fw_reporter_dump, }; int hinic_health_reporters_create(struct hinic_devlink_priv *priv) { struct devlink *devlink = priv_to_devlink(priv); priv->hw_fault_reporter = devlink_health_reporter_create(devlink, &hinic_hw_fault_reporter_ops, 0, priv); if (IS_ERR(priv->hw_fault_reporter)) { dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create hw fault reporter, err: %ld\n", PTR_ERR(priv->hw_fault_reporter)); return PTR_ERR(priv->hw_fault_reporter); } priv->fw_fault_reporter = devlink_health_reporter_create(devlink, &hinic_fw_fault_reporter_ops, 0, priv); if (IS_ERR(priv->fw_fault_reporter)) { dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create fw fault reporter, err: %ld\n", PTR_ERR(priv->fw_fault_reporter)); devlink_health_reporter_destroy(priv->hw_fault_reporter); priv->hw_fault_reporter = NULL; return PTR_ERR(priv->fw_fault_reporter); } return 0; } void hinic_health_reporters_destroy(struct hinic_devlink_priv *priv) { if (!IS_ERR_OR_NULL(priv->fw_fault_reporter)) { devlink_health_reporter_destroy(priv->fw_fault_reporter); priv->fw_fault_reporter = NULL; } if (!IS_ERR_OR_NULL(priv->hw_fault_reporter)) { devlink_health_reporter_destroy(priv->hw_fault_reporter); priv->hw_fault_reporter = NULL; } }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_devlink.c
// SPDX-License-Identifier: GPL-2.0-only /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/if_vlan.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/u64_stats_sync.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/skbuff.h> #include <linux/smp.h> #include <asm/byteorder.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/sctp.h> #include <linux/ipv6.h> #include <net/ipv6.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include "hinic_common.h" #include "hinic_hw_if.h" #include "hinic_hw_wqe.h" #include "hinic_hw_wq.h" #include "hinic_hw_qp.h" #include "hinic_hw_dev.h" #include "hinic_dev.h" #include "hinic_tx.h" #define TX_IRQ_NO_PENDING 0 #define TX_IRQ_NO_COALESC 0 #define TX_IRQ_NO_LLI_TIMER 0 #define TX_IRQ_NO_CREDIT 0 #define TX_IRQ_NO_RESEND_TIMER 0 #define CI_UPDATE_NO_PENDING 0 #define CI_UPDATE_NO_COALESC 0 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) #define MIN_SKB_LEN 32 #define MAX_PAYLOAD_OFFSET 221 #define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data)) union hinic_l3 { struct iphdr *v4; struct ipv6hdr *v6; unsigned char *hdr; }; union hinic_l4 { struct tcphdr *tcp; struct udphdr *udp; unsigned char *hdr; }; enum hinic_offload_type { TX_OFFLOAD_TSO = BIT(0), TX_OFFLOAD_CSUM = BIT(1), TX_OFFLOAD_VLAN = BIT(2), TX_OFFLOAD_INVALID = BIT(3), }; /** * hinic_txq_clean_stats - Clean the statistics of specific queue * @txq: Logical Tx Queue **/ static void hinic_txq_clean_stats(struct hinic_txq *txq) { struct hinic_txq_stats *txq_stats = &txq->txq_stats; u64_stats_update_begin(&txq_stats->syncp); txq_stats->pkts = 0; txq_stats->bytes = 0; txq_stats->tx_busy = 0; txq_stats->tx_wake = 0; txq_stats->tx_dropped = 0; txq_stats->big_frags_pkts = 0; u64_stats_update_end(&txq_stats->syncp); } /** * hinic_txq_get_stats - get statistics of Tx Queue * @txq: Logical Tx Queue * @stats: return updated stats here **/ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) { struct hinic_txq_stats *txq_stats = &txq->txq_stats; unsigned int start; do { start = u64_stats_fetch_begin(&txq_stats->syncp); stats->pkts = txq_stats->pkts; stats->bytes = txq_stats->bytes; stats->tx_busy = txq_stats->tx_busy; stats->tx_wake = txq_stats->tx_wake; stats->tx_dropped = txq_stats->tx_dropped; stats->big_frags_pkts = txq_stats->big_frags_pkts; } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); } /** * txq_stats_init - Initialize the statistics of specific queue * @txq: Logical Tx Queue **/ static void txq_stats_init(struct hinic_txq *txq) { struct hinic_txq_stats *txq_stats = &txq->txq_stats; u64_stats_init(&txq_stats->syncp); hinic_txq_clean_stats(txq); } /** * tx_map_skb - dma mapping for skb and return sges * @nic_dev: nic device * @skb: the skb * @sges: returned sges * * Return 0 - Success, negative - Failure **/ static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, struct hinic_sge *sges) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; skb_frag_t *frag; dma_addr_t dma_addr; int i, j; dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, dma_addr)) { dev_err(&pdev->dev, "Failed to map Tx skb data\n"); return -EFAULT; } hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb)); for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, dma_addr)) { dev_err(&pdev->dev, "Failed to map Tx skb frag\n"); goto err_tx_map; } hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag)); } return 0; err_tx_map: for (j = 0; j < i; j++) dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]), sges[j + 1].len, DMA_TO_DEVICE); dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, DMA_TO_DEVICE); return -EFAULT; } /** * tx_unmap_skb - unmap the dma address of the skb * @nic_dev: nic device * @skb: the skb * @sges: the sges that are connected to the skb **/ static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, struct hinic_sge *sges) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; int i; for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++) dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]), sges[i + 1].len, DMA_TO_DEVICE); dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, DMA_TO_DEVICE); } static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip, union hinic_l4 *l4, enum hinic_offload_type offload_type, enum hinic_l3_offload_type *l3_type, u8 *l4_proto) { u8 *exthdr; if (ip->v4->version == 4) { *l3_type = (offload_type == TX_OFFLOAD_CSUM) ? IPV4_PKT_NO_CHKSUM_OFFLOAD : IPV4_PKT_WITH_CHKSUM_OFFLOAD; *l4_proto = ip->v4->protocol; } else if (ip->v4->version == 6) { *l3_type = IPV6_PKT; exthdr = ip->hdr + sizeof(*ip->v6); *l4_proto = ip->v6->nexthdr; if (exthdr != l4->hdr) { int start = exthdr - skb->data; __be16 frag_off; ipv6_skip_exthdr(skb, start, l4_proto, &frag_off); } } else { *l3_type = L3TYPE_UNKNOWN; *l4_proto = 0; } } static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4, enum hinic_offload_type offload_type, u8 l4_proto, enum hinic_l4_offload_type *l4_offload, u32 *l4_len, u32 *offset) { *l4_offload = OFFLOAD_DISABLE; *offset = 0; *l4_len = 0; switch (l4_proto) { case IPPROTO_TCP: *l4_offload = TCP_OFFLOAD_ENABLE; /* doff in unit of 4B */ *l4_len = l4->tcp->doff * 4; *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb); break; case IPPROTO_UDP: *l4_offload = UDP_OFFLOAD_ENABLE; *l4_len = sizeof(struct udphdr); *offset = TRANSPORT_OFFSET(l4->hdr, skb); break; case IPPROTO_SCTP: /* only csum offload support sctp */ if (offload_type != TX_OFFLOAD_CSUM) break; *l4_offload = SCTP_OFFLOAD_ENABLE; *l4_len = sizeof(struct sctphdr); *offset = TRANSPORT_OFFSET(l4->hdr, skb); break; default: break; } } static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto) { return (ip->v4->version == 4) ? csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) : csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); } static int offload_tso(struct hinic_sq_task *task, u32 *queue_info, struct sk_buff *skb) { u32 offset, l4_len, ip_identify, network_hdr_len; enum hinic_l3_offload_type l3_offload; enum hinic_l4_offload_type l4_offload; union hinic_l3 ip; union hinic_l4 l4; u8 l4_proto; if (!skb_is_gso(skb)) return 0; if (skb_cow_head(skb, 0) < 0) return -EPROTONOSUPPORT; if (skb->encapsulation) { u32 gso_type = skb_shinfo(skb)->gso_type; u32 tunnel_type = 0; u32 l4_tunnel_len; ip.hdr = skb_network_header(skb); l4.hdr = skb_transport_header(skb); network_hdr_len = skb_inner_network_header_len(skb); if (ip.v4->version == 4) { ip.v4->tot_len = 0; l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD; } else if (ip.v4->version == 6) { l3_offload = IPV6_PKT; } else { l3_offload = 0; } hinic_task_set_outter_l3(task, l3_offload, skb_network_header_len(skb)); if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); tunnel_type = TUNNEL_UDP_CSUM; } else if (gso_type & SKB_GSO_UDP_TUNNEL) { tunnel_type = TUNNEL_UDP_NO_CSUM; } l4_tunnel_len = skb_inner_network_offset(skb) - skb_transport_offset(skb); hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len); ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_transport_header(skb); } else { ip.hdr = skb_network_header(skb); l4.hdr = skb_transport_header(skb); network_hdr_len = skb_network_header_len(skb); } /* initialize inner IP header fields */ if (ip.v4->version == 4) ip.v4->tot_len = 0; else ip.v6->payload_len = 0; get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload, &l4_proto); hinic_task_set_inner_l3(task, l3_offload, network_hdr_len); ip_identify = 0; if (l4_proto == IPPROTO_TCP) l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP); get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload, &l4_len, &offset); hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset, ip_identify, skb_shinfo(skb)->gso_size); return 1; } static int offload_csum(struct hinic_sq_task *task, u32 *queue_info, struct sk_buff *skb) { enum hinic_l4_offload_type l4_offload; u32 offset, l4_len, network_hdr_len; enum hinic_l3_offload_type l3_type; u32 tunnel_type = NOT_TUNNEL; union hinic_l3 ip; union hinic_l4 l4; u8 l4_proto; if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; if (skb->encapsulation) { u32 l4_tunnel_len; tunnel_type = TUNNEL_UDP_NO_CSUM; ip.hdr = skb_network_header(skb); if (ip.v4->version == 4) { l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD; l4_proto = ip.v4->protocol; } else if (ip.v4->version == 6) { unsigned char *exthdr; __be16 frag_off; l3_type = IPV6_PKT; tunnel_type = TUNNEL_UDP_CSUM; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; l4.hdr = skb_transport_header(skb); if (l4.hdr != exthdr) ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); } else { l3_type = L3TYPE_UNKNOWN; l4_proto = IPPROTO_RAW; } hinic_task_set_outter_l3(task, l3_type, skb_network_header_len(skb)); switch (l4_proto) { case IPPROTO_UDP: l4_tunnel_len = skb_inner_network_offset(skb) - skb_transport_offset(skb); ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_transport_header(skb); network_hdr_len = skb_inner_network_header_len(skb); break; case IPPROTO_IPIP: case IPPROTO_IPV6: tunnel_type = NOT_TUNNEL; l4_tunnel_len = 0; ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_transport_header(skb); network_hdr_len = skb_network_header_len(skb); break; default: /* Unsupported tunnel packet, disable csum offload */ skb_checksum_help(skb); return 0; } hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len); } else { ip.hdr = skb_network_header(skb); l4.hdr = skb_transport_header(skb); network_hdr_len = skb_network_header_len(skb); } get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type, &l4_proto); hinic_task_set_inner_l3(task, l3_type, network_hdr_len); get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload, &l4_len, &offset); hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset); return 1; } static void offload_vlan(struct hinic_sq_task *task, u32 *queue_info, u16 vlan_tag, u16 vlan_pri) { task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) | HINIC_SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD); *queue_info |= HINIC_SQ_CTRL_SET(vlan_pri, QUEUE_INFO_PRI); } static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task, u32 *queue_info) { enum hinic_offload_type offload = 0; u16 vlan_tag; int enabled; enabled = offload_tso(task, queue_info, skb); if (enabled > 0) { offload |= TX_OFFLOAD_TSO; } else if (enabled == 0) { enabled = offload_csum(task, queue_info, skb); if (enabled) offload |= TX_OFFLOAD_CSUM; } else { return -EPROTONOSUPPORT; } if (unlikely(skb_vlan_tag_present(skb))) { vlan_tag = skb_vlan_tag_get(skb); offload_vlan(task, queue_info, vlan_tag, vlan_tag >> VLAN_PRIO_SHIFT); offload |= TX_OFFLOAD_VLAN; } if (offload) hinic_task_set_l2hdr(task, skb_network_offset(skb)); /* payload offset should not more than 221 */ if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) > MAX_PAYLOAD_OFFSET) { return -EPROTONOSUPPORT; } /* mss should not less than 80 */ if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) { *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS); *queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS); } return 0; } netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct hinic_dev *nic_dev = netdev_priv(netdev); u16 prod_idx, q_id = skb->queue_mapping; struct netdev_queue *netdev_txq; int nr_sges, err = NETDEV_TX_OK; struct hinic_sq_wqe *sq_wqe; unsigned int wqe_size; struct hinic_txq *txq; struct hinic_qp *qp; txq = &nic_dev->txqs[q_id]; qp = container_of(txq->sq, struct hinic_qp, sq); nr_sges = skb_shinfo(skb)->nr_frags + 1; err = tx_map_skb(nic_dev, skb, txq->sges); if (err) goto skb_error; wqe_size = HINIC_SQ_WQE_SIZE(nr_sges); sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); if (!sq_wqe) { netif_stop_subqueue(netdev, qp->q_id); sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); if (sq_wqe) { netif_wake_subqueue(nic_dev->netdev, qp->q_id); goto process_sq_wqe; } tx_unmap_skb(nic_dev, skb, txq->sges); u64_stats_update_begin(&txq->txq_stats.syncp); txq->txq_stats.tx_busy++; u64_stats_update_end(&txq->txq_stats.syncp); err = NETDEV_TX_BUSY; wqe_size = 0; goto flush_skbs; } process_sq_wqe: hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges); hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); flush_skbs: netdev_txq = netdev_get_tx_queue(netdev, q_id); if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq))) hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); return err; skb_error: dev_kfree_skb_any(skb); u64_stats_update_begin(&txq->txq_stats.syncp); txq->txq_stats.tx_dropped++; u64_stats_update_end(&txq->txq_stats.syncp); return NETDEV_TX_OK; } netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct hinic_dev *nic_dev = netdev_priv(netdev); u16 prod_idx, q_id = skb->queue_mapping; struct netdev_queue *netdev_txq; int nr_sges, err = NETDEV_TX_OK; struct hinic_sq_wqe *sq_wqe; unsigned int wqe_size; struct hinic_txq *txq; struct hinic_qp *qp; txq = &nic_dev->txqs[q_id]; qp = container_of(txq->sq, struct hinic_qp, sq); if (skb->len < MIN_SKB_LEN) { if (skb_pad(skb, MIN_SKB_LEN - skb->len)) { netdev_err(netdev, "Failed to pad skb\n"); goto update_error_stats; } skb->len = MIN_SKB_LEN; } nr_sges = skb_shinfo(skb)->nr_frags + 1; if (nr_sges > 17) { u64_stats_update_begin(&txq->txq_stats.syncp); txq->txq_stats.big_frags_pkts++; u64_stats_update_end(&txq->txq_stats.syncp); } if (nr_sges > txq->max_sges) { netdev_err(netdev, "Too many Tx sges\n"); goto skb_error; } err = tx_map_skb(nic_dev, skb, txq->sges); if (err) goto skb_error; wqe_size = HINIC_SQ_WQE_SIZE(nr_sges); sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); if (!sq_wqe) { netif_stop_subqueue(netdev, qp->q_id); /* Check for the case free_tx_poll is called in another cpu * and we stopped the subqueue after free_tx_poll check. */ sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); if (sq_wqe) { netif_wake_subqueue(nic_dev->netdev, qp->q_id); goto process_sq_wqe; } tx_unmap_skb(nic_dev, skb, txq->sges); u64_stats_update_begin(&txq->txq_stats.syncp); txq->txq_stats.tx_busy++; u64_stats_update_end(&txq->txq_stats.syncp); err = NETDEV_TX_BUSY; wqe_size = 0; goto flush_skbs; } process_sq_wqe: hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges); err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info); if (err) goto offload_error; hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); flush_skbs: netdev_txq = netdev_get_tx_queue(netdev, q_id); if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq))) hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); return err; offload_error: hinic_sq_return_wqe(txq->sq, wqe_size); tx_unmap_skb(nic_dev, skb, txq->sges); skb_error: dev_kfree_skb_any(skb); update_error_stats: u64_stats_update_begin(&txq->txq_stats.syncp); txq->txq_stats.tx_dropped++; u64_stats_update_end(&txq->txq_stats.syncp); return NETDEV_TX_OK; } /** * tx_free_skb - unmap and free skb * @nic_dev: nic device * @skb: the skb * @sges: the sges that are connected to the skb **/ static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, struct hinic_sge *sges) { tx_unmap_skb(nic_dev, skb, sges); dev_kfree_skb_any(skb); } /** * free_all_tx_skbs - free all skbs in tx queue * @txq: tx queue **/ static void free_all_tx_skbs(struct hinic_txq *txq) { struct hinic_dev *nic_dev = netdev_priv(txq->netdev); struct hinic_sq *sq = txq->sq; struct hinic_sq_wqe *sq_wqe; unsigned int wqe_size; struct sk_buff *skb; int nr_sges; u16 ci; while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) { sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci); if (!sq_wqe) break; nr_sges = skb_shinfo(skb)->nr_frags + 1; hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); hinic_sq_put_wqe(sq, wqe_size); tx_free_skb(nic_dev, skb, txq->free_sges); } } /** * free_tx_poll - free finished tx skbs in tx queue that connected to napi * @napi: napi * @budget: number of tx * * Return 0 - Success, negative - Failure **/ static int free_tx_poll(struct napi_struct *napi, int budget) { struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi); struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); struct hinic_dev *nic_dev = netdev_priv(txq->netdev); struct netdev_queue *netdev_txq; struct hinic_sq *sq = txq->sq; struct hinic_wq *wq = sq->wq; struct hinic_sq_wqe *sq_wqe; unsigned int wqe_size; int nr_sges, pkts = 0; struct sk_buff *skb; u64 tx_bytes = 0; u16 hw_ci, sw_ci; do { hw_ci = HW_CONS_IDX(sq) & wq->mask; dma_rmb(); /* Reading a WQEBB to get real WQE size and consumer index. */ sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci); if (!sq_wqe || (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size)) break; /* If this WQE have multiple WQEBBs, we will read again to get * full size WQE. */ if (wqe_size > wq->wqebb_size) { sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci); if (unlikely(!sq_wqe)) break; } tx_bytes += skb->len; pkts++; nr_sges = skb_shinfo(skb)->nr_frags + 1; hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); hinic_sq_put_wqe(sq, wqe_size); tx_free_skb(nic_dev, skb, txq->free_sges); } while (pkts < budget); if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) && hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) { netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id); __netif_tx_lock(netdev_txq, smp_processor_id()); if (!netif_testing(nic_dev->netdev)) netif_wake_subqueue(nic_dev->netdev, qp->q_id); __netif_tx_unlock(netdev_txq); u64_stats_update_begin(&txq->txq_stats.syncp); txq->txq_stats.tx_wake++; u64_stats_update_end(&txq->txq_stats.syncp); } u64_stats_update_begin(&txq->txq_stats.syncp); txq->txq_stats.bytes += tx_bytes; txq->txq_stats.pkts += pkts; u64_stats_update_end(&txq->txq_stats.syncp); if (pkts < budget) { napi_complete(napi); if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) hinic_hwdev_set_msix_state(nic_dev->hwdev, sq->msix_entry, HINIC_MSIX_ENABLE); return pkts; } return budget; } static irqreturn_t tx_irq(int irq, void *data) { struct hinic_txq *txq = data; struct hinic_dev *nic_dev; nic_dev = netdev_priv(txq->netdev); if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) /* Disable the interrupt until napi will be completed */ hinic_hwdev_set_msix_state(nic_dev->hwdev, txq->sq->msix_entry, HINIC_MSIX_DISABLE); hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry); napi_schedule(&txq->napi); return IRQ_HANDLED; } static int tx_request_irq(struct hinic_txq *txq) { struct hinic_dev *nic_dev = netdev_priv(txq->netdev); struct hinic_msix_config interrupt_info = {0}; struct hinic_intr_coal_info *intr_coal = NULL; struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_sq *sq = txq->sq; struct hinic_qp *qp; int err; qp = container_of(sq, struct hinic_qp, sq); netif_napi_add_weight(txq->netdev, &txq->napi, free_tx_poll, nic_dev->tx_weight); hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry, TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC, TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT, TX_IRQ_NO_RESEND_TIMER); intr_coal = &nic_dev->tx_intr_coalesce[qp->q_id]; interrupt_info.msix_index = sq->msix_entry; interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg; interrupt_info.pending_cnt = intr_coal->pending_limt; interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg; err = hinic_set_interrupt_cfg(hwdev, &interrupt_info); if (err) { netif_err(nic_dev, drv, txq->netdev, "Failed to set TX interrupt coalescing attribute\n"); netif_napi_del(&txq->napi); return err; } err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq); if (err) { dev_err(&pdev->dev, "Failed to request Tx irq\n"); netif_napi_del(&txq->napi); return err; } return 0; } static void tx_free_irq(struct hinic_txq *txq) { struct hinic_sq *sq = txq->sq; free_irq(sq->irq, txq); netif_napi_del(&txq->napi); } /** * hinic_init_txq - Initialize the Tx Queue * @txq: Logical Tx Queue * @sq: Hardware Tx Queue to connect the Logical queue with * @netdev: network device to connect the Logical queue with * * Return 0 - Success, negative - Failure **/ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, struct net_device *netdev) { struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_hwdev *hwdev = nic_dev->hwdev; int err, irqname_len; txq->netdev = netdev; txq->sq = sq; txq_stats_init(txq); txq->max_sges = HINIC_MAX_SQ_BUFDESCS; txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges, sizeof(*txq->sges), GFP_KERNEL); if (!txq->sges) return -ENOMEM; txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges, sizeof(*txq->free_sges), GFP_KERNEL); if (!txq->free_sges) { err = -ENOMEM; goto err_alloc_free_sges; } irqname_len = snprintf(NULL, 0, "%s_txq%d", netdev->name, qp->q_id) + 1; txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); if (!txq->irq_name) { err = -ENOMEM; goto err_alloc_irqname; } sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id); err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING, CI_UPDATE_NO_COALESC); if (err) goto err_hw_ci; err = tx_request_irq(txq); if (err) { netdev_err(netdev, "Failed to request Tx irq\n"); goto err_req_tx_irq; } return 0; err_req_tx_irq: err_hw_ci: devm_kfree(&netdev->dev, txq->irq_name); err_alloc_irqname: devm_kfree(&netdev->dev, txq->free_sges); err_alloc_free_sges: devm_kfree(&netdev->dev, txq->sges); return err; } /** * hinic_clean_txq - Clean the Tx Queue * @txq: Logical Tx Queue **/ void hinic_clean_txq(struct hinic_txq *txq) { struct net_device *netdev = txq->netdev; tx_free_irq(txq); free_all_tx_skbs(txq); devm_kfree(&netdev->dev, txq->irq_name); devm_kfree(&netdev->dev, txq->free_sges); devm_kfree(&netdev->dev, txq->sges); }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_tx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/log2.h> #include <asm/byteorder.h> #include <asm/barrier.h> #include "hinic_hw_dev.h" #include "hinic_hw_csr.h" #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" #define HINIC_EQS_WQ_NAME "hinic_eqs" #define GET_EQ_NUM_PAGES(eq, pg_size) \ (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size)) #define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size) #define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) #define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) #define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \ HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num)) #define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \ HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num)) #define GET_EQ_ELEMENT(eq, idx) \ ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \ (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) #define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \ GET_EQ_ELEMENT(eq, idx)) #define GET_CEQ_ELEM(eq, idx) ((u32 *) \ GET_EQ_ELEMENT(eq, idx)) #define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx) #define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx) #define PAGE_IN_4K(page_size) ((page_size) >> 12) #define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size))) #define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) #define EQ_SET_HW_ELEM_SIZE_VAL(eq) (ilog2(ELEMENT_SIZE_IN_32B(eq))) #define EQ_MAX_PAGES 8 #define CEQE_TYPE_SHIFT 23 #define CEQE_TYPE_MASK 0x7 #define CEQE_TYPE(ceqe) (((ceqe) >> CEQE_TYPE_SHIFT) & \ CEQE_TYPE_MASK) #define CEQE_DATA_MASK 0x3FFFFFF #define CEQE_DATA(ceqe) ((ceqe) & CEQE_DATA_MASK) #define aeq_to_aeqs(eq) \ container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0]) #define ceq_to_ceqs(eq) \ container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0]) #define work_to_aeq_work(work) \ container_of(work, struct hinic_eq_work, work) #define DMA_ATTR_AEQ_DEFAULT 0 #define DMA_ATTR_CEQ_DEFAULT 0 /* No coalescence */ #define THRESH_CEQ_DEFAULT 0 enum eq_int_mode { EQ_INT_MODE_ARMED, EQ_INT_MODE_ALWAYS }; enum eq_arm_state { EQ_NOT_ARMED, EQ_ARMED }; /** * hinic_aeq_register_hw_cb - register AEQ callback for specific event * @aeqs: pointer to Async eqs of the chip * @event: aeq event to register callback for it * @handle: private data will be used by the callback * @hwe_handler: callback function **/ void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs, enum hinic_aeq_type event, void *handle, void (*hwe_handler)(void *handle, void *data, u8 size)) { struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event]; hwe_cb->hwe_handler = hwe_handler; hwe_cb->handle = handle; hwe_cb->hwe_state = HINIC_EQE_ENABLED; } /** * hinic_aeq_unregister_hw_cb - unregister the AEQ callback for specific event * @aeqs: pointer to Async eqs of the chip * @event: aeq event to unregister callback for it **/ void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs, enum hinic_aeq_type event) { struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event]; hwe_cb->hwe_state &= ~HINIC_EQE_ENABLED; while (hwe_cb->hwe_state & HINIC_EQE_RUNNING) schedule(); hwe_cb->hwe_handler = NULL; } /** * hinic_ceq_register_cb - register CEQ callback for specific event * @ceqs: pointer to Completion eqs part of the chip * @event: ceq event to register callback for it * @handle: private data will be used by the callback * @handler: callback function **/ void hinic_ceq_register_cb(struct hinic_ceqs *ceqs, enum hinic_ceq_type event, void *handle, void (*handler)(void *handle, u32 ceqe_data)) { struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event]; ceq_cb->handler = handler; ceq_cb->handle = handle; ceq_cb->ceqe_state = HINIC_EQE_ENABLED; } /** * hinic_ceq_unregister_cb - unregister the CEQ callback for specific event * @ceqs: pointer to Completion eqs part of the chip * @event: ceq event to unregister callback for it **/ void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs, enum hinic_ceq_type event) { struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event]; ceq_cb->ceqe_state &= ~HINIC_EQE_ENABLED; while (ceq_cb->ceqe_state & HINIC_EQE_RUNNING) schedule(); ceq_cb->handler = NULL; } static u8 eq_cons_idx_checksum_set(u32 val) { u8 checksum = 0; int idx; for (idx = 0; idx < 32; idx += 4) checksum ^= ((val >> idx) & 0xF); return (checksum & 0xF); } /** * eq_update_ci - update the HW cons idx of event queue * @eq: the event queue to update the cons idx for * @arm_state: the arm bit value of eq's interrupt **/ static void eq_update_ci(struct hinic_eq *eq, u32 arm_state) { u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq); /* Read Modify Write */ val = hinic_hwif_read_reg(eq->hwif, addr); val = HINIC_EQ_CI_CLEAR(val, IDX) & HINIC_EQ_CI_CLEAR(val, WRAPPED) & HINIC_EQ_CI_CLEAR(val, INT_ARMED) & HINIC_EQ_CI_CLEAR(val, XOR_CHKSUM); val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) | HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) | HINIC_EQ_CI_SET(arm_state, INT_ARMED); val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM); hinic_hwif_write_reg(eq->hwif, addr, val); } /** * aeq_irq_handler - handler for the AEQ event * @eq: the Async Event Queue that received the event **/ static void aeq_irq_handler(struct hinic_eq *eq) { struct hinic_aeqs *aeqs = aeq_to_aeqs(eq); struct hinic_hwif *hwif = aeqs->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_aeq_elem *aeqe_curr; struct hinic_hw_event_cb *hwe_cb; enum hinic_aeq_type event; unsigned long eqe_state; u32 aeqe_desc; int i, size; for (i = 0; i < eq->q_len; i++) { aeqe_curr = GET_CURR_AEQ_ELEM(eq); /* Data in HW is in Big endian Format */ aeqe_desc = be32_to_cpu(aeqe_curr->desc); /* HW toggles the wrapped bit, when it adds eq element */ if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) break; dma_rmb(); event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE); if (event >= HINIC_MAX_AEQ_EVENTS) { dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event); return; } if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SRC)) { hwe_cb = &aeqs->hwe_cb[event]; size = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SIZE); eqe_state = cmpxchg(&hwe_cb->hwe_state, HINIC_EQE_ENABLED, HINIC_EQE_ENABLED | HINIC_EQE_RUNNING); if (eqe_state == HINIC_EQE_ENABLED && hwe_cb->hwe_handler) hwe_cb->hwe_handler(hwe_cb->handle, aeqe_curr->data, size); else dev_err(&pdev->dev, "Unhandled AEQ Event %d\n", event); hwe_cb->hwe_state &= ~HINIC_EQE_RUNNING; } eq->cons_idx++; if (eq->cons_idx == eq->q_len) { eq->cons_idx = 0; eq->wrapped = !eq->wrapped; } } } /** * ceq_event_handler - handler for the ceq events * @ceqs: ceqs part of the chip * @ceqe: ceq element that describes the event **/ static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe) { struct hinic_hwif *hwif = ceqs->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_ceq_cb *ceq_cb; enum hinic_ceq_type event; unsigned long eqe_state; event = CEQE_TYPE(ceqe); if (event >= HINIC_MAX_CEQ_EVENTS) { dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n", event); return; } ceq_cb = &ceqs->ceq_cb[event]; eqe_state = cmpxchg(&ceq_cb->ceqe_state, HINIC_EQE_ENABLED, HINIC_EQE_ENABLED | HINIC_EQE_RUNNING); if (eqe_state == HINIC_EQE_ENABLED && ceq_cb->handler) ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe)); else dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event); ceq_cb->ceqe_state &= ~HINIC_EQE_RUNNING; } /** * ceq_irq_handler - handler for the CEQ event * @eq: the Completion Event Queue that received the event **/ static void ceq_irq_handler(struct hinic_eq *eq) { struct hinic_ceqs *ceqs = ceq_to_ceqs(eq); u32 ceqe; int i; for (i = 0; i < eq->q_len; i++) { ceqe = *(GET_CURR_CEQ_ELEM(eq)); /* Data in HW is in Big endian Format */ ceqe = be32_to_cpu(ceqe); /* HW toggles the wrapped bit, when it adds eq element event */ if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) break; ceq_event_handler(ceqs, ceqe); eq->cons_idx++; if (eq->cons_idx == eq->q_len) { eq->cons_idx = 0; eq->wrapped = !eq->wrapped; } } } /** * eq_irq_handler - handler for the EQ event * @data: the Event Queue that received the event **/ static void eq_irq_handler(void *data) { struct hinic_eq *eq = data; if (eq->type == HINIC_AEQ) aeq_irq_handler(eq); else if (eq->type == HINIC_CEQ) ceq_irq_handler(eq); eq_update_ci(eq, EQ_ARMED); } /** * eq_irq_work - the work of the EQ that received the event * @work: the work struct that is associated with the EQ **/ static void eq_irq_work(struct work_struct *work) { struct hinic_eq_work *aeq_work = work_to_aeq_work(work); struct hinic_eq *aeq; aeq = aeq_work->data; eq_irq_handler(aeq); } /** * ceq_tasklet - the tasklet of the EQ that received the event * @t: the tasklet struct pointer **/ static void ceq_tasklet(struct tasklet_struct *t) { struct hinic_eq *ceq = from_tasklet(ceq, t, ceq_tasklet); eq_irq_handler(ceq); } /** * aeq_interrupt - aeq interrupt handler * @irq: irq number * @data: the Async Event Queue that collected the event **/ static irqreturn_t aeq_interrupt(int irq, void *data) { struct hinic_eq_work *aeq_work; struct hinic_eq *aeq = data; struct hinic_aeqs *aeqs; /* clear resend timer cnt register */ hinic_msix_attr_cnt_clear(aeq->hwif, aeq->msix_entry.entry); aeq_work = &aeq->aeq_work; aeq_work->data = aeq; aeqs = aeq_to_aeqs(aeq); queue_work(aeqs->workq, &aeq_work->work); return IRQ_HANDLED; } /** * ceq_interrupt - ceq interrupt handler * @irq: irq number * @data: the Completion Event Queue that collected the event **/ static irqreturn_t ceq_interrupt(int irq, void *data) { struct hinic_eq *ceq = data; /* clear resend timer cnt register */ hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry); tasklet_schedule(&ceq->ceq_tasklet); return IRQ_HANDLED; } static u32 get_ctrl0_val(struct hinic_eq *eq, u32 addr) { struct msix_entry *msix_entry = &eq->msix_entry; enum hinic_eq_type type = eq->type; u32 val, ctrl0; if (type == HINIC_AEQ) { /* RMW Ctrl0 */ addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id); val = hinic_hwif_read_reg(eq->hwif, addr); val = HINIC_AEQ_CTRL_0_CLEAR(val, INT_IDX) & HINIC_AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & HINIC_AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & HINIC_AEQ_CTRL_0_CLEAR(val, INT_MODE); ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX) | HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT, DMA_ATTR) | HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif), PCI_INTF_IDX) | HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE); val |= ctrl0; } else { /* RMW Ctrl0 */ addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id); val = hinic_hwif_read_reg(eq->hwif, addr); val = HINIC_CEQ_CTRL_0_CLEAR(val, INTR_IDX) & HINIC_CEQ_CTRL_0_CLEAR(val, DMA_ATTR) & HINIC_CEQ_CTRL_0_CLEAR(val, KICK_THRESH) & HINIC_CEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & HINIC_CEQ_CTRL_0_CLEAR(val, INTR_MODE); ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX) | HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT, DMA_ATTR) | HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT, KICK_THRESH) | HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif), PCI_INTF_IDX) | HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE); val |= ctrl0; } return val; } static void set_ctrl0(struct hinic_eq *eq) { u32 val, addr; if (eq->type == HINIC_AEQ) addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id); else addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id); val = get_ctrl0_val(eq, addr); hinic_hwif_write_reg(eq->hwif, addr, val); } static u32 get_ctrl1_val(struct hinic_eq *eq, u32 addr) { u32 page_size_val, elem_size, val, ctrl1; enum hinic_eq_type type = eq->type; if (type == HINIC_AEQ) { /* RMW Ctrl1 */ addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id); page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); val = hinic_hwif_read_reg(eq->hwif, addr); val = HINIC_AEQ_CTRL_1_CLEAR(val, LEN) & HINIC_AEQ_CTRL_1_CLEAR(val, ELEM_SIZE) & HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE); ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN) | HINIC_AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); val |= ctrl1; } else { /* RMW Ctrl1 */ addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id); page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); val = hinic_hwif_read_reg(eq->hwif, addr); val = HINIC_CEQ_CTRL_1_CLEAR(val, LEN) & HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE); ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) | HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); val |= ctrl1; } return val; } static void set_ctrl1(struct hinic_eq *eq) { u32 addr, val; if (eq->type == HINIC_AEQ) addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id); else addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id); val = get_ctrl1_val(eq, addr); hinic_hwif_write_reg(eq->hwif, addr, val); } static int set_ceq_ctrl_reg(struct hinic_eq *eq) { struct hinic_ceq_ctrl_reg ceq_ctrl = {0}; struct hinic_hwdev *hwdev = eq->hwdev; u16 out_size = sizeof(ceq_ctrl); u16 in_size = sizeof(ceq_ctrl); struct hinic_pfhwdev *pfhwdev; u32 addr; int err; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id); ceq_ctrl.ctrl0 = get_ctrl0_val(eq, addr); addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id); ceq_ctrl.ctrl1 = get_ctrl1_val(eq, addr); ceq_ctrl.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); ceq_ctrl.q_id = eq->q_id; err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP, &ceq_ctrl, in_size, &ceq_ctrl, &out_size, HINIC_MGMT_MSG_SYNC); if (err || !out_size || ceq_ctrl.status) { dev_err(&hwdev->hwif->pdev->dev, "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n", eq->q_id, err, ceq_ctrl.status, out_size); return -EFAULT; } return 0; } /** * set_eq_ctrls - setting eq's ctrl registers * @eq: the Event Queue for setting **/ static int set_eq_ctrls(struct hinic_eq *eq) { if (HINIC_IS_VF(eq->hwif) && eq->type == HINIC_CEQ) return set_ceq_ctrl_reg(eq); set_ctrl0(eq); set_ctrl1(eq); return 0; } /** * aeq_elements_init - initialize all the elements in the aeq * @eq: the Async Event Queue * @init_val: value to initialize the elements with it **/ static void aeq_elements_init(struct hinic_eq *eq, u32 init_val) { struct hinic_aeq_elem *aeqe; int i; for (i = 0; i < eq->q_len; i++) { aeqe = GET_AEQ_ELEM(eq, i); aeqe->desc = cpu_to_be32(init_val); } wmb(); /* Write the initilzation values */ } /** * ceq_elements_init - Initialize all the elements in the ceq * @eq: the event queue * @init_val: value to init with it the elements **/ static void ceq_elements_init(struct hinic_eq *eq, u32 init_val) { u32 *ceqe; int i; for (i = 0; i < eq->q_len; i++) { ceqe = GET_CEQ_ELEM(eq, i); *(ceqe) = cpu_to_be32(init_val); } wmb(); /* Write the initilzation values */ } /** * alloc_eq_pages - allocate the pages for the queue * @eq: the event queue * * Return 0 - Success, Negative - Failure **/ static int alloc_eq_pages(struct hinic_eq *eq) { struct hinic_hwif *hwif = eq->hwif; struct pci_dev *pdev = hwif->pdev; u32 init_val, addr, val; int err, pg; eq->dma_addr = devm_kcalloc(&pdev->dev, eq->num_pages, sizeof(*eq->dma_addr), GFP_KERNEL); if (!eq->dma_addr) return -ENOMEM; eq->virt_addr = devm_kcalloc(&pdev->dev, eq->num_pages, sizeof(*eq->virt_addr), GFP_KERNEL); if (!eq->virt_addr) { err = -ENOMEM; goto err_virt_addr_alloc; } for (pg = 0; pg < eq->num_pages; pg++) { eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev, eq->page_size, &eq->dma_addr[pg], GFP_KERNEL); if (!eq->virt_addr[pg]) { err = -ENOMEM; goto err_dma_alloc; } addr = EQ_HI_PHYS_ADDR_REG(eq, pg); val = upper_32_bits(eq->dma_addr[pg]); hinic_hwif_write_reg(hwif, addr, val); addr = EQ_LO_PHYS_ADDR_REG(eq, pg); val = lower_32_bits(eq->dma_addr[pg]); hinic_hwif_write_reg(hwif, addr, val); } init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED); if (eq->type == HINIC_AEQ) aeq_elements_init(eq, init_val); else if (eq->type == HINIC_CEQ) ceq_elements_init(eq, init_val); return 0; err_dma_alloc: while (--pg >= 0) dma_free_coherent(&pdev->dev, eq->page_size, eq->virt_addr[pg], eq->dma_addr[pg]); devm_kfree(&pdev->dev, eq->virt_addr); err_virt_addr_alloc: devm_kfree(&pdev->dev, eq->dma_addr); return err; } /** * free_eq_pages - free the pages of the queue * @eq: the Event Queue **/ static void free_eq_pages(struct hinic_eq *eq) { struct hinic_hwif *hwif = eq->hwif; struct pci_dev *pdev = hwif->pdev; int pg; for (pg = 0; pg < eq->num_pages; pg++) dma_free_coherent(&pdev->dev, eq->page_size, eq->virt_addr[pg], eq->dma_addr[pg]); devm_kfree(&pdev->dev, eq->virt_addr); devm_kfree(&pdev->dev, eq->dma_addr); } /** * init_eq - initialize Event Queue * @eq: the event queue * @hwif: the HW interface of a PCI function device * @type: the type of the event queue, aeq or ceq * @q_id: Queue id number * @q_len: the number of EQ elements * @page_size: the page size of the pages in the event queue * @entry: msix entry associated with the event queue * * Return 0 - Success, Negative - Failure **/ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size, struct msix_entry entry) { struct pci_dev *pdev = hwif->pdev; int err; eq->hwif = hwif; eq->type = type; eq->q_id = q_id; eq->q_len = q_len; eq->page_size = page_size; /* Clear PI and CI, also clear the ARM bit */ hinic_hwif_write_reg(eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0); hinic_hwif_write_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); eq->cons_idx = 0; eq->wrapped = 0; if (type == HINIC_AEQ) { eq->elem_size = HINIC_AEQE_SIZE; } else if (type == HINIC_CEQ) { eq->elem_size = HINIC_CEQE_SIZE; } else { dev_err(&pdev->dev, "Invalid EQ type\n"); return -EINVAL; } eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size); eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size); eq->msix_entry = entry; if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) { dev_err(&pdev->dev, "num elements in eq page != power of 2\n"); return -EINVAL; } if (eq->num_pages > EQ_MAX_PAGES) { dev_err(&pdev->dev, "too many pages for eq\n"); return -EINVAL; } err = set_eq_ctrls(eq); if (err) { dev_err(&pdev->dev, "Failed to set eq ctrls\n"); return err; } eq_update_ci(eq, EQ_ARMED); err = alloc_eq_pages(eq); if (err) { dev_err(&pdev->dev, "Failed to allocate pages for eq\n"); return err; } if (type == HINIC_AEQ) { struct hinic_eq_work *aeq_work = &eq->aeq_work; INIT_WORK(&aeq_work->work, eq_irq_work); } else if (type == HINIC_CEQ) { tasklet_setup(&eq->ceq_tasklet, ceq_tasklet); } /* set the attributes of the msix entry */ hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry, HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT, HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT, HINIC_EQ_MSIX_LLI_TIMER_DEFAULT, HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT, HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT); if (type == HINIC_AEQ) { snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_aeq%d@pci:%s", eq->q_id, pci_name(pdev)); err = request_irq(entry.vector, aeq_interrupt, 0, eq->irq_name, eq); } else if (type == HINIC_CEQ) { snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_ceq%d@pci:%s", eq->q_id, pci_name(pdev)); err = request_irq(entry.vector, ceq_interrupt, 0, eq->irq_name, eq); } if (err) { dev_err(&pdev->dev, "Failed to request irq for the EQ\n"); goto err_req_irq; } return 0; err_req_irq: free_eq_pages(eq); return err; } /** * remove_eq - remove Event Queue * @eq: the event queue **/ static void remove_eq(struct hinic_eq *eq) { hinic_set_msix_state(eq->hwif, eq->msix_entry.entry, HINIC_MSIX_DISABLE); free_irq(eq->msix_entry.vector, eq); if (eq->type == HINIC_AEQ) { struct hinic_eq_work *aeq_work = &eq->aeq_work; cancel_work_sync(&aeq_work->work); /* clear aeq_len to avoid hw access host memory */ hinic_hwif_write_reg(eq->hwif, HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0); } else if (eq->type == HINIC_CEQ) { tasklet_kill(&eq->ceq_tasklet); /* clear ceq_len to avoid hw access host memory */ hinic_hwif_write_reg(eq->hwif, HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0); } /* update cons_idx to avoid invalid interrupt */ eq->cons_idx = hinic_hwif_read_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq)); eq_update_ci(eq, EQ_NOT_ARMED); free_eq_pages(eq); } /** * hinic_aeqs_init - initialize all the aeqs * @aeqs: pointer to Async eqs of the chip * @hwif: the HW interface of a PCI function device * @num_aeqs: number of AEQs * @q_len: number of EQ elements * @page_size: the page size of the pages in the event queue * @msix_entries: msix entries associated with the event queues * * Return 0 - Success, negative - Failure **/ int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif, int num_aeqs, u32 q_len, u32 page_size, struct msix_entry *msix_entries) { struct pci_dev *pdev = hwif->pdev; int err, i, q_id; aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME); if (!aeqs->workq) return -ENOMEM; aeqs->hwif = hwif; aeqs->num_aeqs = num_aeqs; for (q_id = 0; q_id < num_aeqs; q_id++) { err = init_eq(&aeqs->aeq[q_id], hwif, HINIC_AEQ, q_id, q_len, page_size, msix_entries[q_id]); if (err) { dev_err(&pdev->dev, "Failed to init aeq %d\n", q_id); goto err_init_aeq; } } return 0; err_init_aeq: for (i = 0; i < q_id; i++) remove_eq(&aeqs->aeq[i]); destroy_workqueue(aeqs->workq); return err; } /** * hinic_aeqs_free - free all the aeqs * @aeqs: pointer to Async eqs of the chip **/ void hinic_aeqs_free(struct hinic_aeqs *aeqs) { int q_id; for (q_id = 0; q_id < aeqs->num_aeqs ; q_id++) remove_eq(&aeqs->aeq[q_id]); destroy_workqueue(aeqs->workq); } /** * hinic_ceqs_init - init all the ceqs * @ceqs: ceqs part of the chip * @hwif: the hardware interface of a pci function device * @num_ceqs: number of CEQs * @q_len: number of EQ elements * @page_size: the page size of the event queue * @msix_entries: msix entries associated with the event queues * * Return 0 - Success, Negative - Failure **/ int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif, int num_ceqs, u32 q_len, u32 page_size, struct msix_entry *msix_entries) { struct pci_dev *pdev = hwif->pdev; int i, q_id, err; ceqs->hwif = hwif; ceqs->num_ceqs = num_ceqs; for (q_id = 0; q_id < num_ceqs; q_id++) { ceqs->ceq[q_id].hwdev = ceqs->hwdev; err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len, page_size, msix_entries[q_id]); if (err) { dev_err(&pdev->dev, "Failed to init ceq %d\n", q_id); goto err_init_ceq; } } return 0; err_init_ceq: for (i = 0; i < q_id; i++) remove_eq(&ceqs->ceq[i]); return err; } /** * hinic_ceqs_free - free all the ceqs * @ceqs: ceqs part of the chip **/ void hinic_ceqs_free(struct hinic_ceqs *ceqs) { int q_id; for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) remove_eq(&ceqs->ceq[q_id]); } void hinic_dump_ceq_info(struct hinic_hwdev *hwdev) { struct hinic_eq *eq = NULL; u32 addr, ci, pi; int q_id; for (q_id = 0; q_id < hwdev->func_to_io.ceqs.num_ceqs; q_id++) { eq = &hwdev->func_to_io.ceqs.ceq[q_id]; addr = EQ_CONS_IDX_REG_ADDR(eq); ci = hinic_hwif_read_reg(hwdev->hwif, addr); addr = EQ_PROD_IDX_REG_ADDR(eq); pi = hinic_hwif_read_reg(hwdev->hwif, addr); dev_err(&hwdev->hwif->pdev->dev, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %d, ceqe: 0x%x\n", q_id, ci, eq->cons_idx, pi, eq->ceq_tasklet.state, eq->wrapped, be32_to_cpu(*(__be32 *)(GET_CURR_CEQ_ELEM(eq)))); } } void hinic_dump_aeq_info(struct hinic_hwdev *hwdev) { struct hinic_aeq_elem *aeqe_pos = NULL; struct hinic_eq *eq = NULL; u32 addr, ci, pi; int q_id; for (q_id = 0; q_id < hwdev->aeqs.num_aeqs; q_id++) { eq = &hwdev->aeqs.aeq[q_id]; addr = EQ_CONS_IDX_REG_ADDR(eq); ci = hinic_hwif_read_reg(hwdev->hwif, addr); addr = EQ_PROD_IDX_REG_ADDR(eq); pi = hinic_hwif_read_reg(hwdev->hwif, addr); aeqe_pos = GET_CURR_AEQ_ELEM(eq); dev_err(&hwdev->hwif->pdev->dev, "Aeq id: %d, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %d, desc: 0x%x\n", q_id, ci, pi, work_busy(&eq->aeq_work.work), eq->wrapped, be32_to_cpu(aeqe_pos->desc)); } }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
// SPDX-License-Identifier: GPL-2.0-only /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/semaphore.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/err.h> #include "hinic_hw_dev.h" #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" #include "hinic_hw_wqe.h" #include "hinic_hw_wq.h" #include "hinic_hw_cmdq.h" #include "hinic_hw_qp_ctxt.h" #include "hinic_hw_qp.h" #include "hinic_hw_io.h" #define CI_Q_ADDR_SIZE sizeof(u32) #define CI_ADDR(base_addr, q_id) ((base_addr) + \ (q_id) * CI_Q_ADDR_SIZE) #define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE) #define DB_IDX(db, db_base) \ (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE) #define HINIC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12))) enum io_cmd { IO_CMD_MODIFY_QUEUE_CTXT = 0, IO_CMD_CLEAN_QUEUE_CTXT, }; static void init_db_area_idx(struct hinic_free_db_area *free_db_area) { int i; for (i = 0; i < HINIC_DB_MAX_AREAS; i++) free_db_area->db_idx[i] = i; free_db_area->alloc_pos = 0; free_db_area->return_pos = HINIC_DB_MAX_AREAS; free_db_area->num_free = HINIC_DB_MAX_AREAS; sema_init(&free_db_area->idx_lock, 1); } static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io) { struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area; int pos, idx; down(&free_db_area->idx_lock); free_db_area->num_free--; if (free_db_area->num_free < 0) { free_db_area->num_free++; up(&free_db_area->idx_lock); return ERR_PTR(-ENOMEM); } pos = free_db_area->alloc_pos++; pos &= HINIC_DB_MAX_AREAS - 1; idx = free_db_area->db_idx[pos]; free_db_area->db_idx[pos] = -1; up(&free_db_area->idx_lock); return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE; } static void return_db_area(struct hinic_func_to_io *func_to_io, void __iomem *db_base) { struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area; int pos, idx = DB_IDX(db_base, func_to_io->db_base); down(&free_db_area->idx_lock); pos = free_db_area->return_pos++; pos &= HINIC_DB_MAX_AREAS - 1; free_db_area->db_idx[pos] = idx; free_db_area->num_free++; up(&free_db_area->idx_lock); } static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, u16 num_sqs) { struct hinic_hwif *hwif = func_to_io->hwif; struct hinic_sq_ctxt_block *sq_ctxt_block; struct pci_dev *pdev = hwif->pdev; struct hinic_cmdq_buf cmdq_buf; struct hinic_sq_ctxt *sq_ctxt; struct hinic_qp *qp; u64 out_param; int err, i; err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); if (err) { dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); return err; } sq_ctxt_block = cmdq_buf.buf; sq_ctxt = sq_ctxt_block->sq_ctxt; hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ, num_sqs, func_to_io->max_qps); for (i = 0; i < num_sqs; i++) { qp = &func_to_io->qps[i]; hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq, base_qpn + qp->q_id); } cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs); err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf, &out_param); if (err || out_param != 0) { dev_err(&pdev->dev, "Failed to set SQ ctxts\n"); err = -EFAULT; } hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); return err; } static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, u16 num_rqs) { struct hinic_hwif *hwif = func_to_io->hwif; struct hinic_rq_ctxt_block *rq_ctxt_block; struct pci_dev *pdev = hwif->pdev; struct hinic_cmdq_buf cmdq_buf; struct hinic_rq_ctxt *rq_ctxt; struct hinic_qp *qp; u64 out_param; int err, i; err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); if (err) { dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); return err; } rq_ctxt_block = cmdq_buf.buf; rq_ctxt = rq_ctxt_block->rq_ctxt; hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ, num_rqs, func_to_io->max_qps); for (i = 0; i < num_rqs; i++) { qp = &func_to_io->qps[i]; hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq, base_qpn + qp->q_id); } cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs); err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf, &out_param); if (err || out_param != 0) { dev_err(&pdev->dev, "Failed to set RQ ctxts\n"); err = -EFAULT; } hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); return err; } /** * write_qp_ctxts - write the qp ctxt to HW * @func_to_io: func to io channel that holds the IO components * @base_qpn: first qp number * @num_qps: number of qps to write * * Return 0 - Success, negative - Failure **/ static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, u16 num_qps) { return (write_sq_ctxts(func_to_io, base_qpn, num_qps) || write_rq_ctxts(func_to_io, base_qpn, num_qps)); } static int hinic_clean_queue_offload_ctxt(struct hinic_func_to_io *func_to_io, enum hinic_qp_ctxt_type ctxt_type) { struct hinic_hwif *hwif = func_to_io->hwif; struct hinic_clean_queue_ctxt *ctxt_block; struct pci_dev *pdev = hwif->pdev; struct hinic_cmdq_buf cmdq_buf; u64 out_param = 0; int err; err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); if (err) { dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); return err; } ctxt_block = cmdq_buf.buf; ctxt_block->cmdq_hdr.num_queues = func_to_io->max_qps; ctxt_block->cmdq_hdr.queue_type = ctxt_type; ctxt_block->cmdq_hdr.addr_offset = 0; /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */ ctxt_block->ctxt_size = 0x3; hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block)); cmdq_buf.size = sizeof(*ctxt_block); err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, IO_CMD_CLEAN_QUEUE_CTXT, &cmdq_buf, &out_param); if (err || out_param) { dev_err(&pdev->dev, "Failed to clean offload ctxts, err: %d, out_param: 0x%llx\n", err, out_param); err = -EFAULT; } hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); return err; } static int hinic_clean_qp_offload_ctxt(struct hinic_func_to_io *func_to_io) { /* clean LRO/TSO context space */ return (hinic_clean_queue_offload_ctxt(func_to_io, HINIC_QP_CTXT_TYPE_SQ) || hinic_clean_queue_offload_ctxt(func_to_io, HINIC_QP_CTXT_TYPE_RQ)); } /** * init_qp - Initialize a Queue Pair * @func_to_io: func to io channel that holds the IO components * @qp: pointer to the qp to initialize * @q_id: the id of the qp * @sq_msix_entry: msix entry for sq * @rq_msix_entry: msix entry for rq * * Return 0 - Success, negative - Failure **/ static int init_qp(struct hinic_func_to_io *func_to_io, struct hinic_qp *qp, int q_id, struct msix_entry *sq_msix_entry, struct msix_entry *rq_msix_entry) { struct hinic_hwif *hwif = func_to_io->hwif; struct pci_dev *pdev = hwif->pdev; void __iomem *db_base; int err; qp->q_id = q_id; err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id], HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE, func_to_io->sq_depth, HINIC_SQ_WQE_MAX_SIZE); if (err) { dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n"); return err; } err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id], HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE, func_to_io->rq_depth, HINIC_RQ_WQE_SIZE); if (err) { dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n"); goto err_rq_alloc; } db_base = get_db_area(func_to_io); if (IS_ERR(db_base)) { dev_err(&pdev->dev, "Failed to get DB area for SQ\n"); err = PTR_ERR(db_base); goto err_get_db; } func_to_io->sq_db[q_id] = db_base; qp->sq.qid = q_id; err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id], sq_msix_entry, CI_ADDR(func_to_io->ci_addr_base, q_id), CI_ADDR(func_to_io->ci_dma_base, q_id), db_base); if (err) { dev_err(&pdev->dev, "Failed to init SQ\n"); goto err_sq_init; } qp->rq.qid = q_id; err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id], rq_msix_entry); if (err) { dev_err(&pdev->dev, "Failed to init RQ\n"); goto err_rq_init; } return 0; err_rq_init: hinic_clean_sq(&qp->sq); err_sq_init: return_db_area(func_to_io, db_base); err_get_db: hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); err_rq_alloc: hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); return err; } /** * destroy_qp - Clean the resources of a Queue Pair * @func_to_io: func to io channel that holds the IO components * @qp: pointer to the qp to clean **/ static void destroy_qp(struct hinic_func_to_io *func_to_io, struct hinic_qp *qp) { int q_id = qp->q_id; hinic_clean_rq(&qp->rq); hinic_clean_sq(&qp->sq); return_db_area(func_to_io, func_to_io->sq_db[q_id]); hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); } /** * hinic_io_create_qps - Create Queue Pairs * @func_to_io: func to io channel that holds the IO components * @base_qpn: base qp number * @num_qps: number queue pairs to create * @sq_msix_entries: msix entries for sq * @rq_msix_entries: msix entries for rq * * Return 0 - Success, negative - Failure **/ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, u16 base_qpn, int num_qps, struct msix_entry *sq_msix_entries, struct msix_entry *rq_msix_entries) { struct hinic_hwif *hwif = func_to_io->hwif; struct pci_dev *pdev = hwif->pdev; void *ci_addr_base; int i, j, err; func_to_io->qps = devm_kcalloc(&pdev->dev, num_qps, sizeof(*func_to_io->qps), GFP_KERNEL); if (!func_to_io->qps) return -ENOMEM; func_to_io->sq_wq = devm_kcalloc(&pdev->dev, num_qps, sizeof(*func_to_io->sq_wq), GFP_KERNEL); if (!func_to_io->sq_wq) { err = -ENOMEM; goto err_sq_wq; } func_to_io->rq_wq = devm_kcalloc(&pdev->dev, num_qps, sizeof(*func_to_io->rq_wq), GFP_KERNEL); if (!func_to_io->rq_wq) { err = -ENOMEM; goto err_rq_wq; } func_to_io->sq_db = devm_kcalloc(&pdev->dev, num_qps, sizeof(*func_to_io->sq_db), GFP_KERNEL); if (!func_to_io->sq_db) { err = -ENOMEM; goto err_sq_db; } ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), &func_to_io->ci_dma_base, GFP_KERNEL); if (!ci_addr_base) { dev_err(&pdev->dev, "Failed to allocate CI area\n"); err = -ENOMEM; goto err_ci_base; } func_to_io->ci_addr_base = ci_addr_base; for (i = 0; i < num_qps; i++) { err = init_qp(func_to_io, &func_to_io->qps[i], i, &sq_msix_entries[i], &rq_msix_entries[i]); if (err) { dev_err(&pdev->dev, "Failed to create QP %d\n", i); goto err_init_qp; } } err = write_qp_ctxts(func_to_io, base_qpn, num_qps); if (err) { dev_err(&pdev->dev, "Failed to init QP ctxts\n"); goto err_write_qp_ctxts; } err = hinic_clean_qp_offload_ctxt(func_to_io); if (err) { dev_err(&pdev->dev, "Failed to clean QP contexts space\n"); goto err_write_qp_ctxts; } return 0; err_write_qp_ctxts: err_init_qp: for (j = 0; j < i; j++) destroy_qp(func_to_io, &func_to_io->qps[j]); dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), func_to_io->ci_addr_base, func_to_io->ci_dma_base); err_ci_base: devm_kfree(&pdev->dev, func_to_io->sq_db); err_sq_db: devm_kfree(&pdev->dev, func_to_io->rq_wq); err_rq_wq: devm_kfree(&pdev->dev, func_to_io->sq_wq); err_sq_wq: devm_kfree(&pdev->dev, func_to_io->qps); return err; } /** * hinic_io_destroy_qps - Destroy the IO Queue Pairs * @func_to_io: func to io channel that holds the IO components * @num_qps: number queue pairs to destroy **/ void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps) { struct hinic_hwif *hwif = func_to_io->hwif; struct pci_dev *pdev = hwif->pdev; size_t ci_table_size; int i; ci_table_size = CI_TABLE_SIZE(num_qps); for (i = 0; i < num_qps; i++) destroy_qp(func_to_io, &func_to_io->qps[i]); dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base, func_to_io->ci_dma_base); devm_kfree(&pdev->dev, func_to_io->sq_db); devm_kfree(&pdev->dev, func_to_io->rq_wq); devm_kfree(&pdev->dev, func_to_io->sq_wq); devm_kfree(&pdev->dev, func_to_io->qps); } int hinic_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx, u32 page_size) { struct hinic_wq_page_size page_size_info = {0}; u16 out_size = sizeof(page_size_info); struct hinic_pfhwdev *pfhwdev; int err; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); page_size_info.func_idx = func_idx; page_size_info.ppf_idx = HINIC_HWIF_PPF_IDX(hwdev->hwif); page_size_info.page_size = HINIC_PAGE_SIZE_HW(page_size); err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, HINIC_COMM_CMD_PAGESIZE_SET, &page_size_info, sizeof(page_size_info), &page_size_info, &out_size, HINIC_MGMT_MSG_SYNC); if (err || !out_size || page_size_info.status) { dev_err(&hwdev->hwif->pdev->dev, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n", err, page_size_info.status, out_size); return -EFAULT; } return 0; } /** * hinic_io_init - Initialize the IO components * @func_to_io: func to io channel that holds the IO components * @hwif: HW interface for accessing IO * @max_qps: maximum QPs in HW * @num_ceqs: number completion event queues * @ceq_msix_entries: msix entries for ceqs * * Return 0 - Success, negative - Failure **/ int hinic_io_init(struct hinic_func_to_io *func_to_io, struct hinic_hwif *hwif, u16 max_qps, int num_ceqs, struct msix_entry *ceq_msix_entries) { struct pci_dev *pdev = hwif->pdev; enum hinic_cmdq_type cmdq, type; void __iomem *db_area; int err; func_to_io->hwif = hwif; func_to_io->qps = NULL; func_to_io->max_qps = max_qps; func_to_io->ceqs.hwdev = func_to_io->hwdev; err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs, HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE, ceq_msix_entries); if (err) { dev_err(&pdev->dev, "Failed to init CEQs\n"); return err; } err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif); if (err) { dev_err(&pdev->dev, "Failed to allocate WQS for IO\n"); goto err_wqs_alloc; } func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR); if (!func_to_io->db_base) { dev_err(&pdev->dev, "Failed to remap IO DB area\n"); err = -ENOMEM; goto err_db_ioremap; } init_db_area_idx(&func_to_io->free_db_area); for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) { db_area = get_db_area(func_to_io); if (IS_ERR(db_area)) { dev_err(&pdev->dev, "Failed to get cmdq db area\n"); err = PTR_ERR(db_area); goto err_db_area; } func_to_io->cmdq_db_area[cmdq] = db_area; } err = hinic_set_wq_page_size(func_to_io->hwdev, HINIC_HWIF_FUNC_IDX(hwif), HINIC_DEFAULT_WQ_PAGE_SIZE); if (err) { dev_err(&func_to_io->hwif->pdev->dev, "Failed to set wq page size\n"); goto init_wq_pg_size_err; } err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif, func_to_io->cmdq_db_area); if (err) { dev_err(&pdev->dev, "Failed to initialize cmdqs\n"); goto err_init_cmdqs; } return 0; err_init_cmdqs: if (!HINIC_IS_VF(func_to_io->hwif)) hinic_set_wq_page_size(func_to_io->hwdev, HINIC_HWIF_FUNC_IDX(hwif), HINIC_HW_WQ_PAGE_SIZE); init_wq_pg_size_err: err_db_area: for (type = HINIC_CMDQ_SYNC; type < cmdq; type++) return_db_area(func_to_io, func_to_io->cmdq_db_area[type]); iounmap(func_to_io->db_base); err_db_ioremap: hinic_wqs_free(&func_to_io->wqs); err_wqs_alloc: hinic_ceqs_free(&func_to_io->ceqs); return err; } /** * hinic_io_free - Free the IO components * @func_to_io: func to io channel that holds the IO components **/ void hinic_io_free(struct hinic_func_to_io *func_to_io) { enum hinic_cmdq_type cmdq; hinic_free_cmdqs(&func_to_io->cmdqs); if (!HINIC_IS_VF(func_to_io->hwif)) hinic_set_wq_page_size(func_to_io->hwdev, HINIC_HWIF_FUNC_IDX(func_to_io->hwif), HINIC_HW_WQ_PAGE_SIZE); for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]); iounmap(func_to_io->db_base); hinic_wqs_free(&func_to_io->wqs); hinic_ceqs_free(&func_to_io->ceqs); }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
// SPDX-License-Identifier: GPL-2.0-only /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/vmalloc.h> #include <linux/errno.h> #include <linux/sizes.h> #include <linux/atomic.h> #include <linux/skbuff.h> #include <linux/io.h> #include <asm/barrier.h> #include <asm/byteorder.h> #include "hinic_common.h" #include "hinic_hw_if.h" #include "hinic_hw_wqe.h" #include "hinic_hw_wq.h" #include "hinic_hw_qp_ctxt.h" #include "hinic_hw_qp.h" #include "hinic_hw_io.h" #define SQ_DB_OFF SZ_2K /* The number of cache line to prefetch Until threshold state */ #define WQ_PREFETCH_MAX 2 /* The number of cache line to prefetch After threshold state */ #define WQ_PREFETCH_MIN 1 /* Threshold state */ #define WQ_PREFETCH_THRESHOLD 256 /* sizes of the SQ/RQ ctxt */ #define Q_CTXT_SIZE 48 #define CTXT_RSVD 240 #define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ (((max_rqs) + (max_sqs)) * CTXT_RSVD + (q_id) * Q_CTXT_SIZE) #define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ (((max_rqs) + (max_sqs)) * CTXT_RSVD + \ (max_sqs + (q_id)) * Q_CTXT_SIZE) #define SIZE_16BYTES(size) (ALIGN(size, 16) >> 4) #define SIZE_8BYTES(size) (ALIGN(size, 8) >> 3) #define SECT_SIZE_FROM_8BYTES(size) ((size) << 3) #define SQ_DB_PI_HI_SHIFT 8 #define SQ_DB_PI_HI(prod_idx) ((prod_idx) >> SQ_DB_PI_HI_SHIFT) #define SQ_DB_PI_LOW_MASK 0xFF #define SQ_DB_PI_LOW(prod_idx) ((prod_idx) & SQ_DB_PI_LOW_MASK) #define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) enum sq_wqe_type { SQ_NORMAL_WQE = 0, }; enum rq_completion_fmt { RQ_COMPLETE_SGE = 1 }; void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr, enum hinic_qp_ctxt_type ctxt_type, u16 num_queues, u16 max_queues) { u16 max_sqs = max_queues; u16 max_rqs = max_queues; qp_ctxt_hdr->num_queues = num_queues; qp_ctxt_hdr->queue_type = ctxt_type; if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ) qp_ctxt_hdr->addr_offset = SQ_CTXT_OFFSET(max_sqs, max_rqs, 0); else qp_ctxt_hdr->addr_offset = RQ_CTXT_OFFSET(max_sqs, max_rqs, 0); qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset); hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); } void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt, struct hinic_sq *sq, u16 global_qid) { u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo; u64 wq_page_addr, wq_page_pfn, wq_block_pfn; u16 pi_start, ci_start; struct hinic_wq *wq; wq = sq->wq; ci_start = atomic_read(&wq->cons_idx); pi_start = atomic_read(&wq->prod_idx); /* Read the first page paddr from the WQ page paddr ptrs */ wq_page_addr = be64_to_cpu(*wq->block_vaddr); wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr); wq_page_pfn_hi = upper_32_bits(wq_page_pfn); wq_page_pfn_lo = lower_32_bits(wq_page_pfn); /* If only one page, use 0-level CLA */ if (wq->num_q_pages == 1) wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq_page_addr); else wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); wq_block_pfn_hi = upper_32_bits(wq_block_pfn); wq_block_pfn_lo = lower_32_bits(wq_block_pfn); sq_ctxt->ceq_attr = HINIC_SQ_CTXT_CEQ_ATTR_SET(global_qid, GLOBAL_SQ_ID) | HINIC_SQ_CTXT_CEQ_ATTR_SET(0, EN); sq_ctxt->ci_wrapped = HINIC_SQ_CTXT_CI_SET(ci_start, IDX) | HINIC_SQ_CTXT_CI_SET(1, WRAPPED); sq_ctxt->wq_hi_pfn_pi = HINIC_SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | HINIC_SQ_CTXT_WQ_PAGE_SET(pi_start, PI); sq_ctxt->wq_lo_pfn = wq_page_pfn_lo; sq_ctxt->pref_cache = HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); sq_ctxt->pref_wrapped = 1; sq_ctxt->pref_wq_hi_pfn_ci = HINIC_SQ_CTXT_PREF_SET(ci_start, CI) | HINIC_SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN); sq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo; sq_ctxt->wq_block_hi_pfn = HINIC_SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN); sq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo; hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt)); } void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt, struct hinic_rq *rq, u16 global_qid) { u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo; u64 wq_page_addr, wq_page_pfn, wq_block_pfn; u16 pi_start, ci_start; struct hinic_wq *wq; wq = rq->wq; ci_start = atomic_read(&wq->cons_idx); pi_start = atomic_read(&wq->prod_idx); /* Read the first page paddr from the WQ page paddr ptrs */ wq_page_addr = be64_to_cpu(*wq->block_vaddr); wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr); wq_page_pfn_hi = upper_32_bits(wq_page_pfn); wq_page_pfn_lo = lower_32_bits(wq_page_pfn); wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); wq_block_pfn_hi = upper_32_bits(wq_block_pfn); wq_block_pfn_lo = lower_32_bits(wq_block_pfn); rq_ctxt->ceq_attr = HINIC_RQ_CTXT_CEQ_ATTR_SET(0, EN) | HINIC_RQ_CTXT_CEQ_ATTR_SET(1, WRAPPED); rq_ctxt->pi_intr_attr = HINIC_RQ_CTXT_PI_SET(pi_start, IDX) | HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR); rq_ctxt->wq_hi_pfn_ci = HINIC_RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | HINIC_RQ_CTXT_WQ_PAGE_SET(ci_start, CI); rq_ctxt->wq_lo_pfn = wq_page_pfn_lo; rq_ctxt->pref_cache = HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); rq_ctxt->pref_wrapped = 1; rq_ctxt->pref_wq_hi_pfn_ci = HINIC_RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN) | HINIC_RQ_CTXT_PREF_SET(ci_start, CI); rq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo; rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr); rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr); rq_ctxt->wq_block_hi_pfn = HINIC_RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN); rq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo; hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt)); } /** * alloc_sq_skb_arr - allocate sq array for saved skb * @sq: HW Send Queue * * Return 0 - Success, negative - Failure **/ static int alloc_sq_skb_arr(struct hinic_sq *sq) { struct hinic_wq *wq = sq->wq; size_t skb_arr_size; skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); sq->saved_skb = vzalloc(skb_arr_size); if (!sq->saved_skb) return -ENOMEM; return 0; } /** * free_sq_skb_arr - free sq array for saved skb * @sq: HW Send Queue **/ static void free_sq_skb_arr(struct hinic_sq *sq) { vfree(sq->saved_skb); } /** * alloc_rq_skb_arr - allocate rq array for saved skb * @rq: HW Receive Queue * * Return 0 - Success, negative - Failure **/ static int alloc_rq_skb_arr(struct hinic_rq *rq) { struct hinic_wq *wq = rq->wq; size_t skb_arr_size; skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); rq->saved_skb = vzalloc(skb_arr_size); if (!rq->saved_skb) return -ENOMEM; return 0; } /** * free_rq_skb_arr - free rq array for saved skb * @rq: HW Receive Queue **/ static void free_rq_skb_arr(struct hinic_rq *rq) { vfree(rq->saved_skb); } /** * hinic_init_sq - Initialize HW Send Queue * @sq: HW Send Queue * @hwif: HW Interface for accessing HW * @wq: Work Queue for the data of the SQ * @entry: msix entry for sq * @ci_addr: address for reading the current HW consumer index * @ci_dma_addr: dma address for reading the current HW consumer index * @db_base: doorbell base address * * Return 0 - Success, negative - Failure **/ int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, struct hinic_wq *wq, struct msix_entry *entry, void *ci_addr, dma_addr_t ci_dma_addr, void __iomem *db_base) { sq->hwif = hwif; sq->wq = wq; sq->irq = entry->vector; sq->msix_entry = entry->entry; sq->hw_ci_addr = ci_addr; sq->hw_ci_dma_addr = ci_dma_addr; sq->db_base = db_base + SQ_DB_OFF; return alloc_sq_skb_arr(sq); } /** * hinic_clean_sq - Clean HW Send Queue's Resources * @sq: Send Queue **/ void hinic_clean_sq(struct hinic_sq *sq) { free_sq_skb_arr(sq); } /** * alloc_rq_cqe - allocate rq completion queue elements * @rq: HW Receive Queue * * Return 0 - Success, negative - Failure **/ static int alloc_rq_cqe(struct hinic_rq *rq) { struct hinic_hwif *hwif = rq->hwif; struct pci_dev *pdev = hwif->pdev; size_t cqe_dma_size, cqe_size; struct hinic_wq *wq = rq->wq; int j, i; cqe_size = wq->q_depth * sizeof(*rq->cqe); rq->cqe = vzalloc(cqe_size); if (!rq->cqe) return -ENOMEM; cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma); rq->cqe_dma = vzalloc(cqe_dma_size); if (!rq->cqe_dma) goto err_cqe_dma_arr_alloc; for (i = 0; i < wq->q_depth; i++) { rq->cqe[i] = dma_alloc_coherent(&pdev->dev, sizeof(*rq->cqe[i]), &rq->cqe_dma[i], GFP_KERNEL); if (!rq->cqe[i]) goto err_cqe_alloc; } return 0; err_cqe_alloc: for (j = 0; j < i; j++) dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], rq->cqe_dma[j]); vfree(rq->cqe_dma); err_cqe_dma_arr_alloc: vfree(rq->cqe); return -ENOMEM; } /** * free_rq_cqe - free rq completion queue elements * @rq: HW Receive Queue **/ static void free_rq_cqe(struct hinic_rq *rq) { struct hinic_hwif *hwif = rq->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_wq *wq = rq->wq; int i; for (i = 0; i < wq->q_depth; i++) dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], rq->cqe_dma[i]); vfree(rq->cqe_dma); vfree(rq->cqe); } /** * hinic_init_rq - Initialize HW Receive Queue * @rq: HW Receive Queue * @hwif: HW Interface for accessing HW * @wq: Work Queue for the data of the RQ * @entry: msix entry for rq * * Return 0 - Success, negative - Failure **/ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, struct hinic_wq *wq, struct msix_entry *entry) { struct pci_dev *pdev = hwif->pdev; size_t pi_size; int err; rq->hwif = hwif; rq->wq = wq; rq->irq = entry->vector; rq->msix_entry = entry->entry; rq->buf_sz = HINIC_RX_BUF_SZ; err = alloc_rq_skb_arr(rq); if (err) { dev_err(&pdev->dev, "Failed to allocate rq priv data\n"); return err; } err = alloc_rq_cqe(rq); if (err) { dev_err(&pdev->dev, "Failed to allocate rq cqe\n"); goto err_alloc_rq_cqe; } /* HW requirements: Must be at least 32 bit */ pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size, &rq->pi_dma_addr, GFP_KERNEL); if (!rq->pi_virt_addr) { err = -ENOMEM; goto err_pi_virt; } return 0; err_pi_virt: free_rq_cqe(rq); err_alloc_rq_cqe: free_rq_skb_arr(rq); return err; } /** * hinic_clean_rq - Clean HW Receive Queue's Resources * @rq: HW Receive Queue **/ void hinic_clean_rq(struct hinic_rq *rq) { struct hinic_hwif *hwif = rq->hwif; struct pci_dev *pdev = hwif->pdev; size_t pi_size; pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); dma_free_coherent(&pdev->dev, pi_size, rq->pi_virt_addr, rq->pi_dma_addr); free_rq_cqe(rq); free_rq_skb_arr(rq); } /** * hinic_get_sq_free_wqebbs - return number of free wqebbs for use * @sq: send queue * * Return number of free wqebbs **/ int hinic_get_sq_free_wqebbs(struct hinic_sq *sq) { struct hinic_wq *wq = sq->wq; return atomic_read(&wq->delta) - 1; } /** * hinic_get_rq_free_wqebbs - return number of free wqebbs for use * @rq: recv queue * * Return number of free wqebbs **/ int hinic_get_rq_free_wqebbs(struct hinic_rq *rq) { struct hinic_wq *wq = rq->wq; return atomic_read(&wq->delta) - 1; } static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, int nr_descs) { u32 ctrl_size, task_size, bufdesc_size; ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl)); task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task)); bufdesc_size = nr_descs * sizeof(struct hinic_sq_bufdesc); bufdesc_size = SIZE_8BYTES(bufdesc_size); ctrl->ctrl_info = HINIC_SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) | HINIC_SQ_CTRL_SET(task_size, TASKSECT_LEN) | HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | HINIC_SQ_CTRL_SET(ctrl_size, LEN); ctrl->queue_info = HINIC_SQ_CTRL_SET(HINIC_MSS_DEFAULT, QUEUE_INFO_MSS) | HINIC_SQ_CTRL_SET(1, QUEUE_INFO_UC); } static void sq_prepare_task(struct hinic_sq_task *task) { task->pkt_info0 = 0; task->pkt_info1 = 0; task->pkt_info2 = 0; task->ufo_v6_identify = 0; task->pkt_info4 = HINIC_SQ_TASK_INFO4_SET(HINIC_L2TYPE_ETH, L2TYPE); task->zero_pad = 0; } void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len) { task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(len, L2HDR_LEN); } void hinic_task_set_outter_l3(struct hinic_sq_task *task, enum hinic_l3_offload_type l3_type, u32 network_len) { task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) | HINIC_SQ_TASK_INFO2_SET(network_len, OUTER_L3LEN); } void hinic_task_set_inner_l3(struct hinic_sq_task *task, enum hinic_l3_offload_type l3_type, u32 network_len) { task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE); task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(network_len, INNER_L3LEN); } void hinic_task_set_tunnel_l4(struct hinic_sq_task *task, enum hinic_l4_tunnel_type l4_type, u32 tunnel_len) { task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) | HINIC_SQ_TASK_INFO2_SET(tunnel_len, TUNNEL_L4LEN); } void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info, enum hinic_l4_offload_type l4_offload, u32 l4_len, u32 offset) { u32 tcp_udp_cs = 0, sctp = 0; u32 mss = HINIC_MSS_DEFAULT; if (l4_offload == TCP_OFFLOAD_ENABLE || l4_offload == UDP_OFFLOAD_ENABLE) tcp_udp_cs = 1; else if (l4_offload == SCTP_OFFLOAD_ENABLE) sctp = 1; task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD); task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) | HINIC_SQ_CTRL_SET(tcp_udp_cs, QUEUE_INFO_TCPUDP_CS) | HINIC_SQ_CTRL_SET(sctp, QUEUE_INFO_SCTP); *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS); *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS); } void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info, enum hinic_l4_offload_type l4_offload, u32 l4_len, u32 offset, u32 ip_ident, u32 mss) { u32 tso = 0, ufo = 0; if (l4_offload == TCP_OFFLOAD_ENABLE) tso = 1; else if (l4_offload == UDP_OFFLOAD_ENABLE) ufo = 1; task->ufo_v6_identify = ip_ident; task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD); task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(tso || ufo, TSO_FLAG); task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) | HINIC_SQ_CTRL_SET(tso, QUEUE_INFO_TSO) | HINIC_SQ_CTRL_SET(ufo, QUEUE_INFO_UFO) | HINIC_SQ_CTRL_SET(!!l4_offload, QUEUE_INFO_TCPUDP_CS); /* set MSS value */ *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS); *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS); } /** * hinic_sq_prepare_wqe - prepare wqe before insert to the queue * @sq: send queue * @sq_wqe: wqe to prepare * @sges: sges for use by the wqe for send for buf addresses * @nr_sges: number of sges **/ void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges, int nr_sges) { int i; sq_prepare_ctrl(&sq_wqe->ctrl, nr_sges); sq_prepare_task(&sq_wqe->task); for (i = 0; i < nr_sges; i++) sq_wqe->buf_descs[i].sge = sges[i]; } /** * sq_prepare_db - prepare doorbell to write * @sq: send queue * @prod_idx: pi value for the doorbell * @cos: cos of the doorbell * * Return db value **/ static u32 sq_prepare_db(struct hinic_sq *sq, u16 prod_idx, unsigned int cos) { struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); u8 hi_prod_idx = SQ_DB_PI_HI(SQ_MASKED_IDX(sq, prod_idx)); /* Data should be written to HW in Big Endian Format */ return cpu_to_be32(HINIC_SQ_DB_INFO_SET(hi_prod_idx, PI_HI) | HINIC_SQ_DB_INFO_SET(HINIC_DB_SQ_TYPE, TYPE) | HINIC_SQ_DB_INFO_SET(HINIC_DATA_PATH, PATH) | HINIC_SQ_DB_INFO_SET(cos, COS) | HINIC_SQ_DB_INFO_SET(qp->q_id, QID)); } /** * hinic_sq_write_db- write doorbell * @sq: send queue * @prod_idx: pi value for the doorbell * @wqe_size: wqe size * @cos: cos of the wqe **/ void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, unsigned int cos) { struct hinic_wq *wq = sq->wq; /* increment prod_idx to the next */ prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; prod_idx = SQ_MASKED_IDX(sq, prod_idx); wmb(); /* Write all before the doorbell */ writel(sq_prepare_db(sq, prod_idx, cos), SQ_DB_ADDR(sq, prod_idx)); } /** * hinic_sq_get_wqe - get wqe ptr in the current pi and update the pi * @sq: sq to get wqe from * @wqe_size: wqe size * @prod_idx: returned pi * * Return wqe pointer **/ struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, unsigned int wqe_size, u16 *prod_idx) { struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(sq->wq, wqe_size, prod_idx); if (IS_ERR(hw_wqe)) return NULL; return &hw_wqe->sq_wqe; } /** * hinic_sq_return_wqe - return the wqe to the sq * @sq: send queue * @wqe_size: the size of the wqe **/ void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size) { hinic_return_wqe(sq->wq, wqe_size); } /** * hinic_sq_write_wqe - write the wqe to the sq * @sq: send queue * @prod_idx: pi of the wqe * @sq_wqe: the wqe to write * @skb: skb to save * @wqe_size: the size of the wqe **/ void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, struct hinic_sq_wqe *sq_wqe, struct sk_buff *skb, unsigned int wqe_size) { struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)sq_wqe; sq->saved_skb[prod_idx] = skb; /* The data in the HW should be in Big Endian Format */ hinic_cpu_to_be32(sq_wqe, wqe_size); hinic_write_wqe(sq->wq, hw_wqe, wqe_size); } /** * hinic_sq_read_wqebb - read wqe ptr in the current ci and update the ci, the * wqe only have one wqebb * @sq: send queue * @skb: return skb that was saved * @wqe_size: the wqe size ptr * @cons_idx: consumer index of the wqe * * Return wqe in ci position **/ struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq, struct sk_buff **skb, unsigned int *wqe_size, u16 *cons_idx) { struct hinic_hw_wqe *hw_wqe; struct hinic_sq_wqe *sq_wqe; struct hinic_sq_ctrl *ctrl; unsigned int buf_sect_len; u32 ctrl_info; /* read the ctrl section for getting wqe size */ hw_wqe = hinic_read_wqe(sq->wq, sizeof(*ctrl), cons_idx); if (IS_ERR(hw_wqe)) return NULL; *skb = sq->saved_skb[*cons_idx]; sq_wqe = &hw_wqe->sq_wqe; ctrl = &sq_wqe->ctrl; ctrl_info = be32_to_cpu(ctrl->ctrl_info); buf_sect_len = HINIC_SQ_CTRL_GET(ctrl_info, BUFDESC_SECT_LEN); *wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task); *wqe_size += SECT_SIZE_FROM_8BYTES(buf_sect_len); *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size); return &hw_wqe->sq_wqe; } /** * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci * @sq: send queue * @skb: return skb that was saved * @wqe_size: the size of the wqe * @cons_idx: consumer index of the wqe * * Return wqe in ci position **/ struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, struct sk_buff **skb, unsigned int wqe_size, u16 *cons_idx) { struct hinic_hw_wqe *hw_wqe; hw_wqe = hinic_read_wqe(sq->wq, wqe_size, cons_idx); *skb = sq->saved_skb[*cons_idx]; return &hw_wqe->sq_wqe; } /** * hinic_sq_put_wqe - release the ci for new wqes * @sq: send queue * @wqe_size: the size of the wqe **/ void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size) { hinic_put_wqe(sq->wq, wqe_size); } /** * hinic_sq_get_sges - get sges from the wqe * @sq_wqe: wqe to get the sges from its buffer addresses * @sges: returned sges * @nr_sges: number sges to return **/ void hinic_sq_get_sges(struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges, int nr_sges) { int i; for (i = 0; i < nr_sges && i < HINIC_MAX_SQ_BUFDESCS; i++) { sges[i] = sq_wqe->buf_descs[i].sge; hinic_be32_to_cpu(&sges[i], sizeof(sges[i])); } } /** * hinic_rq_get_wqe - get wqe ptr in the current pi and update the pi * @rq: rq to get wqe from * @wqe_size: wqe size * @prod_idx: returned pi * * Return wqe pointer **/ struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq, unsigned int wqe_size, u16 *prod_idx) { struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size, prod_idx); if (IS_ERR(hw_wqe)) return NULL; return &hw_wqe->rq_wqe; } /** * hinic_rq_write_wqe - write the wqe to the rq * @rq: recv queue * @prod_idx: pi of the wqe * @rq_wqe: the wqe to write * @skb: skb to save **/ void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx, struct hinic_rq_wqe *rq_wqe, struct sk_buff *skb) { struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)rq_wqe; rq->saved_skb[prod_idx] = skb; /* The data in the HW should be in Big Endian Format */ hinic_cpu_to_be32(rq_wqe, sizeof(*rq_wqe)); hinic_write_wqe(rq->wq, hw_wqe, sizeof(*rq_wqe)); } /** * hinic_rq_read_wqe - read wqe ptr in the current ci and update the ci * @rq: recv queue * @wqe_size: the size of the wqe * @skb: return saved skb * @cons_idx: consumer index of the wqe * * Return wqe in ci position **/ struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq, unsigned int wqe_size, struct sk_buff **skb, u16 *cons_idx) { struct hinic_hw_wqe *hw_wqe; struct hinic_rq_cqe *cqe; int rx_done; u32 status; hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx); if (IS_ERR(hw_wqe)) return NULL; cqe = rq->cqe[*cons_idx]; status = be32_to_cpu(cqe->status); rx_done = HINIC_RQ_CQE_STATUS_GET(status, RXDONE); if (!rx_done) return NULL; *skb = rq->saved_skb[*cons_idx]; return &hw_wqe->rq_wqe; } /** * hinic_rq_read_next_wqe - increment ci and read the wqe in ci position * @rq: recv queue * @wqe_size: the size of the wqe * @skb: return saved skb * @cons_idx: consumer index in the wq * * Return wqe in incremented ci position **/ struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq, unsigned int wqe_size, struct sk_buff **skb, u16 *cons_idx) { struct hinic_wq *wq = rq->wq; struct hinic_hw_wqe *hw_wqe; unsigned int num_wqebbs; wqe_size = ALIGN(wqe_size, wq->wqebb_size); num_wqebbs = wqe_size / wq->wqebb_size; *cons_idx = RQ_MASKED_IDX(rq, *cons_idx + num_wqebbs); *skb = rq->saved_skb[*cons_idx]; hw_wqe = hinic_read_wqe_direct(wq, *cons_idx); return &hw_wqe->rq_wqe; } /** * hinic_rq_put_wqe - release the ci for new wqes * @rq: recv queue * @cons_idx: consumer index of the wqe * @wqe_size: the size of the wqe **/ void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx, unsigned int wqe_size) { struct hinic_rq_cqe *cqe = rq->cqe[cons_idx]; u32 status = be32_to_cpu(cqe->status); status = HINIC_RQ_CQE_STATUS_CLEAR(status, RXDONE); /* Rx WQE size is 1 WQEBB, no wq shadow*/ cqe->status = cpu_to_be32(status); wmb(); /* clear done flag */ hinic_put_wqe(rq->wq, wqe_size); } /** * hinic_rq_get_sge - get sge from the wqe * @rq: recv queue * @rq_wqe: wqe to get the sge from its buf address * @cons_idx: consumer index * @sge: returned sge **/ void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe, u16 cons_idx, struct hinic_sge *sge) { struct hinic_rq_cqe *cqe = rq->cqe[cons_idx]; u32 len = be32_to_cpu(cqe->len); sge->hi_addr = be32_to_cpu(rq_wqe->buf_desc.hi_addr); sge->lo_addr = be32_to_cpu(rq_wqe->buf_desc.lo_addr); sge->len = HINIC_RQ_CQE_SGE_GET(len, LEN); } /** * hinic_rq_prepare_wqe - prepare wqe before insert to the queue * @rq: recv queue * @prod_idx: pi value * @rq_wqe: the wqe * @sge: sge for use by the wqe for recv buf address **/ void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx, struct hinic_rq_wqe *rq_wqe, struct hinic_sge *sge) { struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect; struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc; struct hinic_rq_cqe *cqe = rq->cqe[prod_idx]; struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl; dma_addr_t cqe_dma = rq->cqe_dma[prod_idx]; ctrl->ctrl_info = HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) | HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), COMPLETE_LEN) | HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), BUFDESC_SECT_LEN) | HINIC_RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT); hinic_set_sge(&cqe_sect->sge, cqe_dma, sizeof(*cqe)); buf_desc->hi_addr = sge->hi_addr; buf_desc->lo_addr = sge->lo_addr; } /** * hinic_rq_update - update pi of the rq * @rq: recv queue * @prod_idx: pi value **/ void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx) { *rq->pi_virt_addr = cpu_to_be16(RQ_MASKED_IDX(rq, prod_idx + 1)); }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/atomic.h> #include <linux/semaphore.h> #include <linux/errno.h> #include <linux/vmalloc.h> #include <linux/err.h> #include <asm/byteorder.h> #include "hinic_hw_if.h" #include "hinic_hw_wqe.h" #include "hinic_hw_wq.h" #include "hinic_hw_cmdq.h" #define WQS_BLOCKS_PER_PAGE 4 #define WQ_BLOCK_SIZE 4096 #define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE) #define WQS_MAX_NUM_BLOCKS 128 #define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \ sizeof((wqs)->free_blocks[0])) #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size) #define WQ_PAGE_ADDR_SIZE sizeof(u64) #define WQ_MAX_PAGES (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE) #define CMDQ_BLOCK_SIZE 512 #define CMDQ_PAGE_SIZE 4096 #define CMDQ_WQ_MAX_PAGES (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE) #define WQ_BASE_VADDR(wqs, wq) \ ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \ + (wq)->block_idx * WQ_BLOCK_SIZE) #define WQ_BASE_PADDR(wqs, wq) \ ((wqs)->page_paddr[(wq)->page_idx] \ + (wq)->block_idx * WQ_BLOCK_SIZE) #define WQ_BASE_ADDR(wqs, wq) \ ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \ + (wq)->block_idx * WQ_BLOCK_SIZE) #define CMDQ_BASE_VADDR(cmdq_pages, wq) \ ((void *)((cmdq_pages)->page_vaddr) \ + (wq)->block_idx * CMDQ_BLOCK_SIZE) #define CMDQ_BASE_PADDR(cmdq_pages, wq) \ ((cmdq_pages)->page_paddr \ + (wq)->block_idx * CMDQ_BLOCK_SIZE) #define CMDQ_BASE_ADDR(cmdq_pages, wq) \ ((void *)((cmdq_pages)->shadow_page_vaddr) \ + (wq)->block_idx * CMDQ_BLOCK_SIZE) #define WQ_PAGE_ADDR(wq, idx) \ ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)]) #define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask) #define WQE_IN_RANGE(wqe, start, end) \ (((unsigned long)(wqe) >= (unsigned long)(start)) && \ ((unsigned long)(wqe) < (unsigned long)(end))) #define WQE_SHADOW_PAGE(wq, wqe) \ (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \ / (wq)->max_wqe_size) static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx) { return (((idx) & ((wq)->num_wqebbs_per_page - 1)) << (wq)->wqebb_size_shift); } static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx) { return (((idx) >> ((wq)->wqebbs_per_page_shift)) & ((wq)->num_q_pages - 1)); } /** * queue_alloc_page - allocate page for Queue * @hwif: HW interface for allocating DMA * @vaddr: virtual address will be returned in this address * @paddr: physical address will be returned in this address * @shadow_vaddr: VM area will be return here for holding WQ page addresses * @page_sz: page size of each WQ page * * Return 0 - Success, negative - Failure **/ static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr, void ***shadow_vaddr, size_t page_sz) { struct pci_dev *pdev = hwif->pdev; dma_addr_t dma_addr; *vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr, GFP_KERNEL); if (!*vaddr) { dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); return -ENOMEM; } *paddr = (u64)dma_addr; /* use vzalloc for big mem */ *shadow_vaddr = vzalloc(page_sz); if (!*shadow_vaddr) goto err_shadow_vaddr; return 0; err_shadow_vaddr: dma_free_coherent(&pdev->dev, page_sz, *vaddr, dma_addr); return -ENOMEM; } /** * wqs_allocate_page - allocate page for WQ set * @wqs: Work Queue Set * @page_idx: the page index of the page will be allocated * * Return 0 - Success, negative - Failure **/ static int wqs_allocate_page(struct hinic_wqs *wqs, int page_idx) { return queue_alloc_page(wqs->hwif, &wqs->page_vaddr[page_idx], &wqs->page_paddr[page_idx], &wqs->shadow_page_vaddr[page_idx], WQS_PAGE_SIZE); } /** * wqs_free_page - free page of WQ set * @wqs: Work Queue Set * @page_idx: the page index of the page will be freed **/ static void wqs_free_page(struct hinic_wqs *wqs, int page_idx) { struct hinic_hwif *hwif = wqs->hwif; struct pci_dev *pdev = hwif->pdev; dma_free_coherent(&pdev->dev, WQS_PAGE_SIZE, wqs->page_vaddr[page_idx], (dma_addr_t)wqs->page_paddr[page_idx]); vfree(wqs->shadow_page_vaddr[page_idx]); } /** * cmdq_allocate_page - allocate page for cmdq * @cmdq_pages: the pages of the cmdq queue struct to hold the page * * Return 0 - Success, negative - Failure **/ static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages) { return queue_alloc_page(cmdq_pages->hwif, &cmdq_pages->page_vaddr, &cmdq_pages->page_paddr, &cmdq_pages->shadow_page_vaddr, CMDQ_PAGE_SIZE); } /** * cmdq_free_page - free page from cmdq * @cmdq_pages: the pages of the cmdq queue struct that hold the page **/ static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages) { struct hinic_hwif *hwif = cmdq_pages->hwif; struct pci_dev *pdev = hwif->pdev; dma_free_coherent(&pdev->dev, CMDQ_PAGE_SIZE, cmdq_pages->page_vaddr, (dma_addr_t)cmdq_pages->page_paddr); vfree(cmdq_pages->shadow_page_vaddr); } static int alloc_page_arrays(struct hinic_wqs *wqs) { struct hinic_hwif *hwif = wqs->hwif; struct pci_dev *pdev = hwif->pdev; wqs->page_paddr = devm_kcalloc(&pdev->dev, wqs->num_pages, sizeof(*wqs->page_paddr), GFP_KERNEL); if (!wqs->page_paddr) return -ENOMEM; wqs->page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages, sizeof(*wqs->page_vaddr), GFP_KERNEL); if (!wqs->page_vaddr) goto err_page_vaddr; wqs->shadow_page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages, sizeof(*wqs->shadow_page_vaddr), GFP_KERNEL); if (!wqs->shadow_page_vaddr) goto err_page_shadow_vaddr; return 0; err_page_shadow_vaddr: devm_kfree(&pdev->dev, wqs->page_vaddr); err_page_vaddr: devm_kfree(&pdev->dev, wqs->page_paddr); return -ENOMEM; } static void free_page_arrays(struct hinic_wqs *wqs) { struct hinic_hwif *hwif = wqs->hwif; struct pci_dev *pdev = hwif->pdev; devm_kfree(&pdev->dev, wqs->shadow_page_vaddr); devm_kfree(&pdev->dev, wqs->page_vaddr); devm_kfree(&pdev->dev, wqs->page_paddr); } static int wqs_next_block(struct hinic_wqs *wqs, int *page_idx, int *block_idx) { int pos; down(&wqs->alloc_blocks_lock); wqs->num_free_blks--; if (wqs->num_free_blks < 0) { wqs->num_free_blks++; up(&wqs->alloc_blocks_lock); return -ENOMEM; } pos = wqs->alloc_blk_pos++; pos &= WQS_MAX_NUM_BLOCKS - 1; *page_idx = wqs->free_blocks[pos].page_idx; *block_idx = wqs->free_blocks[pos].block_idx; wqs->free_blocks[pos].page_idx = -1; wqs->free_blocks[pos].block_idx = -1; up(&wqs->alloc_blocks_lock); return 0; } static void wqs_return_block(struct hinic_wqs *wqs, int page_idx, int block_idx) { int pos; down(&wqs->alloc_blocks_lock); pos = wqs->return_blk_pos++; pos &= WQS_MAX_NUM_BLOCKS - 1; wqs->free_blocks[pos].page_idx = page_idx; wqs->free_blocks[pos].block_idx = block_idx; wqs->num_free_blks++; up(&wqs->alloc_blocks_lock); } static void init_wqs_blocks_arr(struct hinic_wqs *wqs) { int page_idx, blk_idx, pos = 0; for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) { wqs->free_blocks[pos].page_idx = page_idx; wqs->free_blocks[pos].block_idx = blk_idx; pos++; } } wqs->alloc_blk_pos = 0; wqs->return_blk_pos = pos; wqs->num_free_blks = pos; sema_init(&wqs->alloc_blocks_lock, 1); } /** * hinic_wqs_alloc - allocate Work Queues set * @wqs: Work Queue Set * @max_wqs: maximum wqs to allocate * @hwif: HW interface for use for the allocation * * Return 0 - Success, negative - Failure **/ int hinic_wqs_alloc(struct hinic_wqs *wqs, int max_wqs, struct hinic_hwif *hwif) { struct pci_dev *pdev = hwif->pdev; int err, i, page_idx; max_wqs = ALIGN(max_wqs, WQS_BLOCKS_PER_PAGE); if (max_wqs > WQS_MAX_NUM_BLOCKS) { dev_err(&pdev->dev, "Invalid max_wqs = %d\n", max_wqs); return -EINVAL; } wqs->hwif = hwif; wqs->num_pages = max_wqs / WQS_BLOCKS_PER_PAGE; if (alloc_page_arrays(wqs)) { dev_err(&pdev->dev, "Failed to allocate mem for page addresses\n"); return -ENOMEM; } for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { err = wqs_allocate_page(wqs, page_idx); if (err) { dev_err(&pdev->dev, "Failed wq page allocation\n"); goto err_wq_allocate_page; } } wqs->free_blocks = devm_kzalloc(&pdev->dev, WQS_FREE_BLOCKS_SIZE(wqs), GFP_KERNEL); if (!wqs->free_blocks) { err = -ENOMEM; goto err_alloc_blocks; } init_wqs_blocks_arr(wqs); return 0; err_alloc_blocks: err_wq_allocate_page: for (i = 0; i < page_idx; i++) wqs_free_page(wqs, i); free_page_arrays(wqs); return err; } /** * hinic_wqs_free - free Work Queues set * @wqs: Work Queue Set **/ void hinic_wqs_free(struct hinic_wqs *wqs) { struct hinic_hwif *hwif = wqs->hwif; struct pci_dev *pdev = hwif->pdev; int page_idx; devm_kfree(&pdev->dev, wqs->free_blocks); for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) wqs_free_page(wqs, page_idx); free_page_arrays(wqs); } /** * alloc_wqes_shadow - allocate WQE shadows for WQ * @wq: WQ to allocate shadows for * * Return 0 - Success, negative - Failure **/ static int alloc_wqes_shadow(struct hinic_wq *wq) { struct hinic_hwif *hwif = wq->hwif; struct pci_dev *pdev = hwif->pdev; wq->shadow_wqe = devm_kcalloc(&pdev->dev, wq->num_q_pages, wq->max_wqe_size, GFP_KERNEL); if (!wq->shadow_wqe) return -ENOMEM; wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages, sizeof(*wq->shadow_idx), GFP_KERNEL); if (!wq->shadow_idx) goto err_shadow_idx; return 0; err_shadow_idx: devm_kfree(&pdev->dev, wq->shadow_wqe); return -ENOMEM; } /** * free_wqes_shadow - free WQE shadows of WQ * @wq: WQ to free shadows from **/ static void free_wqes_shadow(struct hinic_wq *wq) { struct hinic_hwif *hwif = wq->hwif; struct pci_dev *pdev = hwif->pdev; devm_kfree(&pdev->dev, wq->shadow_idx); devm_kfree(&pdev->dev, wq->shadow_wqe); } /** * free_wq_pages - free pages of WQ * @hwif: HW interface for releasing dma addresses * @wq: WQ to free pages from * @num_q_pages: number pages to free **/ static void free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, int num_q_pages) { struct pci_dev *pdev = hwif->pdev; int i; for (i = 0; i < num_q_pages; i++) { void **vaddr = &wq->shadow_block_vaddr[i]; u64 *paddr = &wq->block_vaddr[i]; dma_addr_t dma_addr; dma_addr = (dma_addr_t)be64_to_cpu(*paddr); dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr, dma_addr); } free_wqes_shadow(wq); } /** * alloc_wq_pages - alloc pages for WQ * @hwif: HW interface for allocating dma addresses * @wq: WQ to allocate pages for * @max_pages: maximum pages allowed * * Return 0 - Success, negative - Failure **/ static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, int max_pages) { struct pci_dev *pdev = hwif->pdev; int i, err, num_q_pages; num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size; if (num_q_pages > max_pages) { dev_err(&pdev->dev, "Number wq pages exceeds the limit\n"); return -EINVAL; } if (num_q_pages & (num_q_pages - 1)) { dev_err(&pdev->dev, "Number wq pages must be power of 2\n"); return -EINVAL; } wq->num_q_pages = num_q_pages; err = alloc_wqes_shadow(wq); if (err) { dev_err(&pdev->dev, "Failed to allocate wqe shadow\n"); return err; } for (i = 0; i < num_q_pages; i++) { void **vaddr = &wq->shadow_block_vaddr[i]; u64 *paddr = &wq->block_vaddr[i]; dma_addr_t dma_addr; *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size, &dma_addr, GFP_KERNEL); if (!*vaddr) { dev_err(&pdev->dev, "Failed to allocate wq page\n"); goto err_alloc_wq_pages; } /* HW uses Big Endian Format */ *paddr = cpu_to_be64(dma_addr); } return 0; err_alloc_wq_pages: free_wq_pages(wq, hwif, i); return -ENOMEM; } /** * hinic_wq_allocate - Allocate the WQ resources from the WQS * @wqs: WQ set from which to allocate the WQ resources * @wq: WQ to allocate resources for it from the WQ set * @wqebb_size: Work Queue Block Byte Size * @wq_page_size: the page size in the Work Queue * @q_depth: number of wqebbs in WQ * @max_wqe_size: maximum WQE size that will be used in the WQ * * Return 0 - Success, negative - Failure **/ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, u16 wqebb_size, u32 wq_page_size, u16 q_depth, u16 max_wqe_size) { struct hinic_hwif *hwif = wqs->hwif; struct pci_dev *pdev = hwif->pdev; u16 num_wqebbs_per_page; u16 wqebb_size_shift; int err; if (!is_power_of_2(wqebb_size)) { dev_err(&pdev->dev, "wqebb_size must be power of 2\n"); return -EINVAL; } if (wq_page_size == 0) { dev_err(&pdev->dev, "wq_page_size must be > 0\n"); return -EINVAL; } if (q_depth & (q_depth - 1)) { dev_err(&pdev->dev, "WQ q_depth must be power of 2\n"); return -EINVAL; } wqebb_size_shift = ilog2(wqebb_size); num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) >> wqebb_size_shift; if (!is_power_of_2(num_wqebbs_per_page)) { dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n"); return -EINVAL; } wq->hwif = hwif; err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx); if (err) { dev_err(&pdev->dev, "Failed to get free wqs next block\n"); return err; } wq->wqebb_size = wqebb_size; wq->wq_page_size = wq_page_size; wq->q_depth = q_depth; wq->max_wqe_size = max_wqe_size; wq->num_wqebbs_per_page = num_wqebbs_per_page; wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page); wq->wqebb_size_shift = wqebb_size_shift; wq->block_vaddr = WQ_BASE_VADDR(wqs, wq); wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq); wq->block_paddr = WQ_BASE_PADDR(wqs, wq); err = alloc_wq_pages(wq, wqs->hwif, WQ_MAX_PAGES); if (err) { dev_err(&pdev->dev, "Failed to allocate wq pages\n"); goto err_alloc_wq_pages; } atomic_set(&wq->cons_idx, 0); atomic_set(&wq->prod_idx, 0); atomic_set(&wq->delta, q_depth); wq->mask = q_depth - 1; return 0; err_alloc_wq_pages: wqs_return_block(wqs, wq->page_idx, wq->block_idx); return err; } /** * hinic_wq_free - Free the WQ resources to the WQS * @wqs: WQ set to free the WQ resources to it * @wq: WQ to free its resources to the WQ set resources **/ void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq) { free_wq_pages(wq, wqs->hwif, wq->num_q_pages); wqs_return_block(wqs, wq->page_idx, wq->block_idx); } /** * hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs * @cmdq_pages: will hold the pages of the cmdq * @wq: returned wqs * @hwif: HW interface * @cmdq_blocks: number of cmdq blocks/wq to allocate * @wqebb_size: Work Queue Block Byte Size * @wq_page_size: the page size in the Work Queue * @q_depth: number of wqebbs in WQ * @max_wqe_size: maximum WQE size that will be used in the WQ * * Return 0 - Success, negative - Failure **/ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, struct hinic_wq *wq, struct hinic_hwif *hwif, int cmdq_blocks, u16 wqebb_size, u32 wq_page_size, u16 q_depth, u16 max_wqe_size) { struct pci_dev *pdev = hwif->pdev; u16 num_wqebbs_per_page_shift; u16 num_wqebbs_per_page; u16 wqebb_size_shift; int i, j, err = -ENOMEM; if (!is_power_of_2(wqebb_size)) { dev_err(&pdev->dev, "wqebb_size must be power of 2\n"); return -EINVAL; } if (wq_page_size == 0) { dev_err(&pdev->dev, "wq_page_size must be > 0\n"); return -EINVAL; } if (q_depth & (q_depth - 1)) { dev_err(&pdev->dev, "WQ q_depth must be power of 2\n"); return -EINVAL; } wqebb_size_shift = ilog2(wqebb_size); num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) >> wqebb_size_shift; if (!is_power_of_2(num_wqebbs_per_page)) { dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n"); return -EINVAL; } cmdq_pages->hwif = hwif; err = cmdq_allocate_page(cmdq_pages); if (err) { dev_err(&pdev->dev, "Failed to allocate CMDQ page\n"); return err; } num_wqebbs_per_page_shift = ilog2(num_wqebbs_per_page); for (i = 0; i < cmdq_blocks; i++) { wq[i].hwif = hwif; wq[i].page_idx = 0; wq[i].block_idx = i; wq[i].wqebb_size = wqebb_size; wq[i].wq_page_size = wq_page_size; wq[i].q_depth = q_depth; wq[i].max_wqe_size = max_wqe_size; wq[i].num_wqebbs_per_page = num_wqebbs_per_page; wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift; wq[i].wqebb_size_shift = wqebb_size_shift; wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]); wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]); wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]); err = alloc_wq_pages(&wq[i], cmdq_pages->hwif, CMDQ_WQ_MAX_PAGES); if (err) { dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n"); goto err_cmdq_block; } atomic_set(&wq[i].cons_idx, 0); atomic_set(&wq[i].prod_idx, 0); atomic_set(&wq[i].delta, q_depth); wq[i].mask = q_depth - 1; } return 0; err_cmdq_block: for (j = 0; j < i; j++) free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages); cmdq_free_page(cmdq_pages); return err; } /** * hinic_wqs_cmdq_free - Free wqs from cmdqs * @cmdq_pages: hold the pages of the cmdq * @wq: wqs to free * @cmdq_blocks: number of wqs to free **/ void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, struct hinic_wq *wq, int cmdq_blocks) { int i; for (i = 0; i < cmdq_blocks; i++) free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages); cmdq_free_page(cmdq_pages); } static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr, int num_wqebbs, u16 idx) { void *wqebb_addr; int i; for (i = 0; i < num_wqebbs; i++, idx++) { idx = MASKED_WQE_IDX(wq, idx); wqebb_addr = WQ_PAGE_ADDR(wq, idx) + WQE_PAGE_OFF(wq, idx); memcpy(shadow_addr, wqebb_addr, wq->wqebb_size); shadow_addr += wq->wqebb_size; } } static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr, int num_wqebbs, u16 idx) { void *wqebb_addr; int i; for (i = 0; i < num_wqebbs; i++, idx++) { idx = MASKED_WQE_IDX(wq, idx); wqebb_addr = WQ_PAGE_ADDR(wq, idx) + WQE_PAGE_OFF(wq, idx); memcpy(wqebb_addr, shadow_addr, wq->wqebb_size); shadow_addr += wq->wqebb_size; } } /** * hinic_get_wqe - get wqe ptr in the current pi and update the pi * @wq: wq to get wqe from * @wqe_size: wqe size * @prod_idx: returned pi * * Return wqe pointer **/ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, u16 *prod_idx) { int curr_pg, end_pg, num_wqebbs; u16 curr_prod_idx, end_prod_idx; *prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx)); num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift; if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) { atomic_add(num_wqebbs, &wq->delta); return ERR_PTR(-EBUSY); } end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx); end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx); curr_prod_idx = end_prod_idx - num_wqebbs; curr_prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx); /* end prod index points to the next wqebb, therefore minus 1 */ end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx - 1); curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx); end_pg = WQE_PAGE_NUM(wq, end_prod_idx); *prod_idx = curr_prod_idx; /* If we only have one page, still need to get shadown wqe when * wqe rolling-over page */ if (curr_pg != end_pg || end_prod_idx < *prod_idx) { void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx); wq->shadow_idx[curr_pg] = *prod_idx; return shadow_addr; } return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx); } /** * hinic_return_wqe - return the wqe when transmit failed * @wq: wq to return wqe * @wqe_size: wqe size **/ void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size) { int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; atomic_sub(num_wqebbs, &wq->prod_idx); atomic_add(num_wqebbs, &wq->delta); } /** * hinic_put_wqe - return the wqe place to use for a new wqe * @wq: wq to return wqe * @wqe_size: wqe size **/ void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size) { int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift; atomic_add(num_wqebbs, &wq->cons_idx); atomic_add(num_wqebbs, &wq->delta); } /** * hinic_read_wqe - read wqe ptr in the current ci * @wq: wq to get read from * @wqe_size: wqe size * @cons_idx: returned ci * * Return wqe pointer **/ struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, u16 *cons_idx) { int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift; u16 curr_cons_idx, end_cons_idx; int curr_pg, end_pg; if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth) return ERR_PTR(-EBUSY); curr_cons_idx = atomic_read(&wq->cons_idx); curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx); end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1); curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx); end_pg = WQE_PAGE_NUM(wq, end_cons_idx); *cons_idx = curr_cons_idx; /* If we only have one page, still need to get shadown wqe when * wqe rolling-over page */ if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) { void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx); return shadow_addr; } return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx); } /** * hinic_read_wqe_direct - read wqe directly from ci position * @wq: wq * @cons_idx: ci position * * Return wqe **/ struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx) { return WQ_PAGE_ADDR(wq, cons_idx) + WQE_PAGE_OFF(wq, cons_idx); } /** * wqe_shadow - check if a wqe is shadow * @wq: wq of the wqe * @wqe: the wqe for shadow checking * * Return true - shadow, false - Not shadow **/ static inline bool wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe) { size_t wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size; return WQE_IN_RANGE(wqe, wq->shadow_wqe, &wq->shadow_wqe[wqe_shadow_size]); } /** * hinic_write_wqe - write the wqe to the wq * @wq: wq to write wqe to * @wqe: wqe to write * @wqe_size: wqe size **/ void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe, unsigned int wqe_size) { int curr_pg, num_wqebbs; void *shadow_addr; u16 prod_idx; if (wqe_shadow(wq, wqe)) { curr_pg = WQE_SHADOW_PAGE(wq, wqe); prod_idx = wq->shadow_idx[curr_pg]; num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; copy_wqe_from_shadow(wq, shadow_addr, num_wqebbs, prod_idx); } }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
// SPDX-License-Identifier: GPL-2.0-only /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/semaphore.h> #include <linux/completion.h> #include <linux/slab.h> #include <net/devlink.h> #include <asm/barrier.h> #include "hinic_devlink.h" #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" #include "hinic_hw_api_cmd.h" #include "hinic_hw_mgmt.h" #include "hinic_hw_dev.h" #define SYNC_MSG_ID_MASK 0x1FF #define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) #define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ ((SYNC_MSG_ID(pf_to_mgmt) + 1) & \ SYNC_MSG_ID_MASK)) #define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_LEN) #define MGMT_MSG_LEN_MIN 20 #define MGMT_MSG_LEN_STEP 16 #define MGMT_MSG_RSVD_FOR_DEV 8 #define SEGMENT_LEN 48 #define MAX_PF_MGMT_BUF_SIZE 2048 /* Data should be SEG LEN size aligned */ #define MAX_MSG_LEN 2016 #define MSG_NOT_RESP 0xFFFF #define MGMT_MSG_TIMEOUT 5000 #define SET_FUNC_PORT_MBOX_TIMEOUT 30000 #define SET_FUNC_PORT_MGMT_TIMEOUT 25000 #define UPDATE_FW_MGMT_TIMEOUT 20000 #define mgmt_to_pfhwdev(pf_mgmt) \ container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) enum msg_segment_type { NOT_LAST_SEGMENT = 0, LAST_SEGMENT = 1, }; enum mgmt_direction_type { MGMT_DIRECT_SEND = 0, MGMT_RESP = 1, }; enum msg_ack_type { MSG_ACK = 0, MSG_NO_ACK = 1, }; /** * hinic_register_mgmt_msg_cb - register msg handler for a msg from a module * @pf_to_mgmt: PF to MGMT channel * @mod: module in the chip that this handler will handle its messages * @handle: private data for the callback * @callback: the handler that will handle messages **/ void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, enum hinic_mod_type mod, void *handle, void (*callback)(void *handle, u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size)) { struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod]; mgmt_cb->cb = callback; mgmt_cb->handle = handle; mgmt_cb->state = HINIC_MGMT_CB_ENABLED; } /** * hinic_unregister_mgmt_msg_cb - unregister msg handler for a msg from a module * @pf_to_mgmt: PF to MGMT channel * @mod: module in the chip that this handler handles its messages **/ void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, enum hinic_mod_type mod) { struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod]; mgmt_cb->state &= ~HINIC_MGMT_CB_ENABLED; while (mgmt_cb->state & HINIC_MGMT_CB_RUNNING) schedule(); mgmt_cb->cb = NULL; } /** * prepare_header - prepare the header of the message * @pf_to_mgmt: PF to MGMT channel * @msg_len: the length of the message * @mod: module in the chip that will get the message * @ack_type: ask for response * @direction: the direction of the message * @cmd: command of the message * @msg_id: message id * * Return the prepared header value **/ static u64 prepare_header(struct hinic_pf_to_mgmt *pf_to_mgmt, u16 msg_len, enum hinic_mod_type mod, enum msg_ack_type ack_type, enum mgmt_direction_type direction, u16 cmd, u16 msg_id) { struct hinic_hwif *hwif = pf_to_mgmt->hwif; return HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) | HINIC_MSG_HEADER_SET(mod, MODULE) | HINIC_MSG_HEADER_SET(SEGMENT_LEN, SEG_LEN) | HINIC_MSG_HEADER_SET(ack_type, NO_ACK) | HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | HINIC_MSG_HEADER_SET(0, SEQID) | HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | HINIC_MSG_HEADER_SET(direction, DIRECTION) | HINIC_MSG_HEADER_SET(cmd, CMD) | HINIC_MSG_HEADER_SET(HINIC_HWIF_PCI_INTF(hwif), PCI_INTF) | HINIC_MSG_HEADER_SET(HINIC_HWIF_PF_IDX(hwif), PF_IDX) | HINIC_MSG_HEADER_SET(msg_id, MSG_ID); } /** * prepare_mgmt_cmd - prepare the mgmt command * @mgmt_cmd: pointer to the command to prepare * @header: pointer of the header for the message * @msg: the data of the message * @msg_len: the length of the message **/ static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, u8 *msg, u16 msg_len) { memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; memcpy(mgmt_cmd, header, sizeof(*header)); mgmt_cmd += sizeof(*header); memcpy(mgmt_cmd, msg, msg_len); } /** * mgmt_msg_len - calculate the total message length * @msg_data_len: the length of the message data * * Return the total message length **/ static u16 mgmt_msg_len(u16 msg_data_len) { /* RSVD + HEADER_SIZE + DATA_LEN */ u16 msg_len = MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len; if (msg_len > MGMT_MSG_LEN_MIN) msg_len = MGMT_MSG_LEN_MIN + ALIGN((msg_len - MGMT_MSG_LEN_MIN), MGMT_MSG_LEN_STEP); else msg_len = MGMT_MSG_LEN_MIN; return msg_len; } /** * send_msg_to_mgmt - send message to mgmt by API CMD * @pf_to_mgmt: PF to MGMT channel * @mod: module in the chip that will get the message * @cmd: command of the message * @data: the msg data * @data_len: the msg data length * @ack_type: ask for response * @direction: the direction of the original message * @resp_msg_id: msg id to response for * * Return 0 - Success, negative - Failure **/ static int send_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, enum hinic_mod_type mod, u8 cmd, u8 *data, u16 data_len, enum msg_ack_type ack_type, enum mgmt_direction_type direction, u16 resp_msg_id) { struct hinic_api_cmd_chain *chain; u64 header; u16 msg_id; msg_id = SYNC_MSG_ID(pf_to_mgmt); if (direction == MGMT_RESP) { header = prepare_header(pf_to_mgmt, data_len, mod, ack_type, direction, cmd, resp_msg_id); } else { SYNC_MSG_ID_INC(pf_to_mgmt); header = prepare_header(pf_to_mgmt, data_len, mod, ack_type, direction, cmd, msg_id); } prepare_mgmt_cmd(pf_to_mgmt->sync_msg_buf, &header, data, data_len); chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU]; return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT, pf_to_mgmt->sync_msg_buf, mgmt_msg_len(data_len)); } /** * msg_to_mgmt_sync - send sync message to mgmt * @pf_to_mgmt: PF to MGMT channel * @mod: module in the chip that will get the message * @cmd: command of the message * @buf_in: the msg data * @in_size: the msg data length * @buf_out: response * @out_size: response length * @direction: the direction of the original message * @resp_msg_id: msg id to response for * @timeout: time-out period of waiting for response * * Return 0 - Success, negative - Failure **/ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, enum hinic_mod_type mod, u8 cmd, u8 *buf_in, u16 in_size, u8 *buf_out, u16 *out_size, enum mgmt_direction_type direction, u16 resp_msg_id, u32 timeout) { struct hinic_hwif *hwif = pf_to_mgmt->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_recv_msg *recv_msg; struct completion *recv_done; unsigned long timeo; u16 msg_id; int err; /* Lock the sync_msg_buf */ down(&pf_to_mgmt->sync_msg_lock); recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; recv_done = &recv_msg->recv_done; if (resp_msg_id == MSG_NOT_RESP) msg_id = SYNC_MSG_ID(pf_to_mgmt); else msg_id = resp_msg_id; init_completion(recv_done); err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size, MSG_ACK, direction, resp_msg_id); if (err) { dev_err(&pdev->dev, "Failed to send sync msg to mgmt\n"); goto unlock_sync_msg; } timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); if (!wait_for_completion_timeout(recv_done, timeo)) { dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); hinic_dump_aeq_info(pf_to_mgmt->hwdev); err = -ETIMEDOUT; goto unlock_sync_msg; } smp_rmb(); /* verify reading after completion */ if (recv_msg->msg_id != msg_id) { dev_err(&pdev->dev, "incorrect MSG for id = %d\n", msg_id); err = -EFAULT; goto unlock_sync_msg; } if (buf_out && recv_msg->msg_len <= MAX_PF_MGMT_BUF_SIZE) { memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); *out_size = recv_msg->msg_len; } unlock_sync_msg: up(&pf_to_mgmt->sync_msg_lock); return err; } /** * msg_to_mgmt_async - send message to mgmt without response * @pf_to_mgmt: PF to MGMT channel * @mod: module in the chip that will get the message * @cmd: command of the message * @buf_in: the msg data * @in_size: the msg data length * @direction: the direction of the original message * @resp_msg_id: msg id to response for * * Return 0 - Success, negative - Failure **/ static int msg_to_mgmt_async(struct hinic_pf_to_mgmt *pf_to_mgmt, enum hinic_mod_type mod, u8 cmd, u8 *buf_in, u16 in_size, enum mgmt_direction_type direction, u16 resp_msg_id) { int err; /* Lock the sync_msg_buf */ down(&pf_to_mgmt->sync_msg_lock); err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size, MSG_NO_ACK, direction, resp_msg_id); up(&pf_to_mgmt->sync_msg_lock); return err; } /** * hinic_msg_to_mgmt - send message to mgmt * @pf_to_mgmt: PF to MGMT channel * @mod: module in the chip that will get the message * @cmd: command of the message * @buf_in: the msg data * @in_size: the msg data length * @buf_out: response * @out_size: returned response length * @sync: sync msg or async msg * * Return 0 - Success, negative - Failure **/ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, enum hinic_mod_type mod, u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size, enum hinic_mgmt_msg_type sync) { struct hinic_hwif *hwif = pf_to_mgmt->hwif; struct pci_dev *pdev = hwif->pdev; u32 timeout = 0; if (sync != HINIC_MGMT_MSG_SYNC) { dev_err(&pdev->dev, "Invalid MGMT msg type\n"); return -EINVAL; } if (!MSG_SZ_IS_VALID(in_size)) { dev_err(&pdev->dev, "Invalid MGMT msg buffer size\n"); return -EINVAL; } if (HINIC_IS_VF(hwif)) { if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE) timeout = SET_FUNC_PORT_MBOX_TIMEOUT; return hinic_mbox_to_pf(pf_to_mgmt->hwdev, mod, cmd, buf_in, in_size, buf_out, out_size, timeout); } else { if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE) timeout = SET_FUNC_PORT_MGMT_TIMEOUT; else if (cmd == HINIC_PORT_CMD_UPDATE_FW) timeout = UPDATE_FW_MGMT_TIMEOUT; return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, buf_out, out_size, MGMT_DIRECT_SEND, MSG_NOT_RESP, timeout); } } static void recv_mgmt_msg_work_handler(struct work_struct *work) { struct hinic_mgmt_msg_handle_work *mgmt_work = container_of(work, struct hinic_mgmt_msg_handle_work, work); struct hinic_pf_to_mgmt *pf_to_mgmt = mgmt_work->pf_to_mgmt; struct pci_dev *pdev = pf_to_mgmt->hwif->pdev; u8 *buf_out = pf_to_mgmt->mgmt_ack_buf; struct hinic_mgmt_cb *mgmt_cb; unsigned long cb_state; u16 out_size = 0; memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE); if (mgmt_work->mod >= HINIC_MOD_MAX) { dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n", mgmt_work->mod); kfree(mgmt_work->msg); kfree(mgmt_work); return; } mgmt_cb = &pf_to_mgmt->mgmt_cb[mgmt_work->mod]; cb_state = cmpxchg(&mgmt_cb->state, HINIC_MGMT_CB_ENABLED, HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING); if (cb_state == HINIC_MGMT_CB_ENABLED && mgmt_cb->cb) mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd, mgmt_work->msg, mgmt_work->msg_len, buf_out, &out_size); else dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n", mgmt_work->mod, mgmt_work->cmd); mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING; if (!mgmt_work->async_mgmt_to_pf) /* MGMT sent sync msg, send the response */ msg_to_mgmt_async(pf_to_mgmt, mgmt_work->mod, mgmt_work->cmd, buf_out, out_size, MGMT_RESP, mgmt_work->msg_id); kfree(mgmt_work->msg); kfree(mgmt_work); } /** * mgmt_recv_msg_handler - handler for message from mgmt cpu * @pf_to_mgmt: PF to MGMT channel * @recv_msg: received message details **/ static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, struct hinic_recv_msg *recv_msg) { struct hinic_mgmt_msg_handle_work *mgmt_work = NULL; mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); if (!mgmt_work) return; if (recv_msg->msg_len) { mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); if (!mgmt_work->msg) { kfree(mgmt_work); return; } } mgmt_work->pf_to_mgmt = pf_to_mgmt; mgmt_work->msg_len = recv_msg->msg_len; memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len); mgmt_work->msg_id = recv_msg->msg_id; mgmt_work->mod = recv_msg->mod; mgmt_work->cmd = recv_msg->cmd; mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf; INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler); queue_work(pf_to_mgmt->workq, &mgmt_work->work); } /** * mgmt_resp_msg_handler - handler for a response message from mgmt cpu * @pf_to_mgmt: PF to MGMT channel * @recv_msg: received message details **/ static void mgmt_resp_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, struct hinic_recv_msg *recv_msg) { wmb(); /* verify writing all, before reading */ complete(&recv_msg->recv_done); } /** * recv_mgmt_msg_handler - handler for a message from mgmt cpu * @pf_to_mgmt: PF to MGMT channel * @header: the header of the message * @recv_msg: received message details **/ static void recv_mgmt_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, u64 *header, struct hinic_recv_msg *recv_msg) { struct hinic_hwif *hwif = pf_to_mgmt->hwif; struct pci_dev *pdev = hwif->pdev; int seq_id, seg_len; u8 *msg_body; seq_id = HINIC_MSG_HEADER_GET(*header, SEQID); seg_len = HINIC_MSG_HEADER_GET(*header, SEG_LEN); if (seq_id >= (MAX_MSG_LEN / SEGMENT_LEN)) { dev_err(&pdev->dev, "recv big mgmt msg\n"); return; } msg_body = (u8 *)header + sizeof(*header); memcpy(recv_msg->msg + seq_id * SEGMENT_LEN, msg_body, seg_len); if (!HINIC_MSG_HEADER_GET(*header, LAST)) return; recv_msg->cmd = HINIC_MSG_HEADER_GET(*header, CMD); recv_msg->mod = HINIC_MSG_HEADER_GET(*header, MODULE); recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(*header, ASYNC_MGMT_TO_PF); recv_msg->msg_len = HINIC_MSG_HEADER_GET(*header, MSG_LEN); recv_msg->msg_id = HINIC_MSG_HEADER_GET(*header, MSG_ID); if (HINIC_MSG_HEADER_GET(*header, DIRECTION) == MGMT_RESP) mgmt_resp_msg_handler(pf_to_mgmt, recv_msg); else mgmt_recv_msg_handler(pf_to_mgmt, recv_msg); } /** * mgmt_msg_aeqe_handler - handler for a mgmt message event * @handle: PF to MGMT channel * @data: the header of the message * @size: unused **/ static void mgmt_msg_aeqe_handler(void *handle, void *data, u8 size) { struct hinic_pf_to_mgmt *pf_to_mgmt = handle; struct hinic_recv_msg *recv_msg; u64 *header = (u64 *)data; recv_msg = HINIC_MSG_HEADER_GET(*header, DIRECTION) == MGMT_DIRECT_SEND ? &pf_to_mgmt->recv_msg_from_mgmt : &pf_to_mgmt->recv_resp_msg_from_mgmt; recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg); } /** * alloc_recv_msg - allocate receive message memory * @pf_to_mgmt: PF to MGMT channel * @recv_msg: pointer that will hold the allocated data * * Return 0 - Success, negative - Failure **/ static int alloc_recv_msg(struct hinic_pf_to_mgmt *pf_to_mgmt, struct hinic_recv_msg *recv_msg) { struct hinic_hwif *hwif = pf_to_mgmt->hwif; struct pci_dev *pdev = hwif->pdev; recv_msg->msg = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); if (!recv_msg->msg) return -ENOMEM; recv_msg->buf_out = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); if (!recv_msg->buf_out) return -ENOMEM; return 0; } /** * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel * @pf_to_mgmt: PF to MGMT channel * * Return 0 - Success, negative - Failure **/ static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt) { struct hinic_hwif *hwif = pf_to_mgmt->hwif; struct pci_dev *pdev = hwif->pdev; int err; err = alloc_recv_msg(pf_to_mgmt, &pf_to_mgmt->recv_msg_from_mgmt); if (err) { dev_err(&pdev->dev, "Failed to allocate recv msg\n"); return err; } err = alloc_recv_msg(pf_to_mgmt, &pf_to_mgmt->recv_resp_msg_from_mgmt); if (err) { dev_err(&pdev->dev, "Failed to allocate resp recv msg\n"); return err; } pf_to_mgmt->sync_msg_buf = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); if (!pf_to_mgmt->sync_msg_buf) return -ENOMEM; pf_to_mgmt->mgmt_ack_buf = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); if (!pf_to_mgmt->mgmt_ack_buf) return -ENOMEM; return 0; } /** * hinic_pf_to_mgmt_init - initialize PF to MGMT channel * @pf_to_mgmt: PF to MGMT channel * @hwif: HW interface the PF to MGMT will use for accessing HW * * Return 0 - Success, negative - Failure **/ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, struct hinic_hwif *hwif) { struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt); struct hinic_hwdev *hwdev = &pfhwdev->hwdev; struct pci_dev *pdev = hwif->pdev; int err; pf_to_mgmt->hwif = hwif; pf_to_mgmt->hwdev = hwdev; if (HINIC_IS_VF(hwif)) return 0; err = hinic_health_reporters_create(hwdev->devlink_dev); if (err) return err; sema_init(&pf_to_mgmt->sync_msg_lock, 1); pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt"); if (!pf_to_mgmt->workq) { dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n"); hinic_health_reporters_destroy(hwdev->devlink_dev); return -ENOMEM; } pf_to_mgmt->sync_msg_id = 0; err = alloc_msg_buf(pf_to_mgmt); if (err) { dev_err(&pdev->dev, "Failed to allocate msg buffers\n"); destroy_workqueue(pf_to_mgmt->workq); hinic_health_reporters_destroy(hwdev->devlink_dev); return err; } err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif); if (err) { dev_err(&pdev->dev, "Failed to initialize cmd chains\n"); destroy_workqueue(pf_to_mgmt->workq); hinic_health_reporters_destroy(hwdev->devlink_dev); return err; } hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU, pf_to_mgmt, mgmt_msg_aeqe_handler); return 0; } /** * hinic_pf_to_mgmt_free - free PF to MGMT channel * @pf_to_mgmt: PF to MGMT channel **/ void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt) { struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt); struct hinic_hwdev *hwdev = &pfhwdev->hwdev; if (HINIC_IS_VF(hwdev->hwif)) return; hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU); hinic_api_cmd_free(pf_to_mgmt->cmd_chain); destroy_workqueue(pf_to_mgmt->workq); hinic_health_reporters_destroy(hwdev->devlink_dev); }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
// SPDX-License-Identifier: GPL-2.0-only /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/bitops.h> #include <linux/err.h> #include <linux/jiffies.h> #include <linux/delay.h> #include <linux/log2.h> #include <linux/semaphore.h> #include <asm/byteorder.h> #include <asm/barrier.h> #include "hinic_hw_csr.h" #include "hinic_hw_if.h" #include "hinic_hw_api_cmd.h" #define API_CHAIN_NUM_CELLS 32 #define API_CMD_CELL_SIZE_SHIFT 6 #define API_CMD_CELL_SIZE_MIN (BIT(API_CMD_CELL_SIZE_SHIFT)) #define API_CMD_CELL_SIZE(cell_size) \ (((cell_size) >= API_CMD_CELL_SIZE_MIN) ? \ (1 << (fls(cell_size - 1))) : API_CMD_CELL_SIZE_MIN) #define API_CMD_CELL_SIZE_VAL(size) \ ilog2((size) >> API_CMD_CELL_SIZE_SHIFT) #define API_CMD_BUF_SIZE 2048 /* Sizes of the members in hinic_api_cmd_cell */ #define API_CMD_CELL_DESC_SIZE 8 #define API_CMD_CELL_DATA_ADDR_SIZE 8 #define API_CMD_CELL_ALIGNMENT 8 #define API_CMD_TIMEOUT 1000 #define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1)) #define SIZE_8BYTES(size) (ALIGN((size), 8) >> 3) #define SIZE_4BYTES(size) (ALIGN((size), 4) >> 2) #define RD_DMA_ATTR_DEFAULT 0 #define WR_DMA_ATTR_DEFAULT 0 enum api_cmd_data_format { SGE_DATA = 1, /* cell data is passed by hw address */ }; enum api_cmd_type { API_CMD_WRITE = 0, }; enum api_cmd_bypass { NO_BYPASS = 0, BYPASS = 1, }; enum api_cmd_xor_chk_level { XOR_CHK_DIS = 0, XOR_CHK_ALL = 3, }; static u8 xor_chksum_set(void *data) { int idx; u8 *val, checksum = 0; val = data; for (idx = 0; idx < 7; idx++) checksum ^= val[idx]; return checksum; } static void set_prod_idx(struct hinic_api_cmd_chain *chain) { enum hinic_api_cmd_chain_type chain_type = chain->chain_type; struct hinic_hwif *hwif = chain->hwif; u32 addr, prod_idx; addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type); prod_idx = hinic_hwif_read_reg(hwif, addr); prod_idx = HINIC_API_CMD_PI_CLEAR(prod_idx, IDX); prod_idx |= HINIC_API_CMD_PI_SET(chain->prod_idx, IDX); hinic_hwif_write_reg(hwif, addr, prod_idx); } static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain) { u32 addr, val; addr = HINIC_CSR_API_CMD_STATUS_ADDR(chain->chain_type); val = hinic_hwif_read_reg(chain->hwif, addr); return HINIC_API_CMD_STATUS_GET(val, CONS_IDX); } static void dump_api_chain_reg(struct hinic_api_cmd_chain *chain) { u32 addr, val; addr = HINIC_CSR_API_CMD_STATUS_ADDR(chain->chain_type); val = hinic_hwif_read_reg(chain->hwif, addr); dev_err(&chain->hwif->pdev->dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n", chain->chain_type, HINIC_API_CMD_STATUS_GET(val, CPLD_ERR), HINIC_API_CMD_STATUS_GET(val, CHKSUM_ERR), HINIC_API_CMD_STATUS_GET(val, FSM)); dev_err(&chain->hwif->pdev->dev, "Chain hw current ci: 0x%x\n", HINIC_API_CMD_STATUS_GET(val, CONS_IDX)); addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type); val = hinic_hwif_read_reg(chain->hwif, addr); dev_err(&chain->hwif->pdev->dev, "Chain hw current pi: 0x%x\n", val); } /** * chain_busy - check if the chain is still processing last requests * @chain: chain to check * * Return 0 - Success, negative - Failure **/ static int chain_busy(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwif; struct pci_dev *pdev = hwif->pdev; u32 prod_idx; switch (chain->chain_type) { case HINIC_API_CMD_WRITE_TO_MGMT_CPU: chain->cons_idx = get_hw_cons_idx(chain); prod_idx = chain->prod_idx; /* check for a space for a new command */ if (chain->cons_idx == MASKED_IDX(chain, prod_idx + 1)) { dev_err(&pdev->dev, "API CMD chain %d is busy, cons_idx: %d, prod_idx: %d\n", chain->chain_type, chain->cons_idx, chain->prod_idx); dump_api_chain_reg(chain); return -EBUSY; } break; default: dev_err(&pdev->dev, "Unknown API CMD Chain type\n"); break; } return 0; } /** * get_cell_data_size - get the data size of a specific cell type * @type: chain type * * Return the data(Desc + Address) size in the cell **/ static u8 get_cell_data_size(enum hinic_api_cmd_chain_type type) { u8 cell_data_size = 0; switch (type) { case HINIC_API_CMD_WRITE_TO_MGMT_CPU: cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + API_CMD_CELL_DATA_ADDR_SIZE, API_CMD_CELL_ALIGNMENT); break; default: break; } return cell_data_size; } /** * prepare_cell_ctrl - prepare the ctrl of the cell for the command * @cell_ctrl: the control of the cell to set the control value into it * @data_size: the size of the data in the cell **/ static void prepare_cell_ctrl(u64 *cell_ctrl, u16 data_size) { u8 chksum; u64 ctrl; ctrl = HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(data_size), DATA_SZ) | HINIC_API_CMD_CELL_CTRL_SET(RD_DMA_ATTR_DEFAULT, RD_DMA_ATTR) | HINIC_API_CMD_CELL_CTRL_SET(WR_DMA_ATTR_DEFAULT, WR_DMA_ATTR); chksum = xor_chksum_set(&ctrl); ctrl |= HINIC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM); /* The data in the HW should be in Big Endian Format */ *cell_ctrl = cpu_to_be64(ctrl); } /** * prepare_api_cmd - prepare API CMD command * @chain: chain for the command * @dest: destination node on the card that will receive the command * @cmd: command data * @cmd_size: the command size **/ static void prepare_api_cmd(struct hinic_api_cmd_chain *chain, enum hinic_node_id dest, void *cmd, u16 cmd_size) { struct hinic_api_cmd_cell *cell = chain->curr_node; struct hinic_api_cmd_cell_ctxt *cell_ctxt; struct hinic_hwif *hwif = chain->hwif; struct pci_dev *pdev = hwif->pdev; cell_ctxt = &chain->cell_ctxt[chain->prod_idx]; switch (chain->chain_type) { case HINIC_API_CMD_WRITE_TO_MGMT_CPU: cell->desc = HINIC_API_CMD_DESC_SET(SGE_DATA, API_TYPE) | HINIC_API_CMD_DESC_SET(API_CMD_WRITE, RD_WR) | HINIC_API_CMD_DESC_SET(NO_BYPASS, MGMT_BYPASS); break; default: dev_err(&pdev->dev, "unknown Chain type\n"); return; } cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) | HINIC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE); cell->desc |= HINIC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc), XOR_CHKSUM); /* The data in the HW should be in Big Endian Format */ cell->desc = cpu_to_be64(cell->desc); memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size); } /** * prepare_cell - prepare cell ctrl and cmd in the current cell * @chain: chain for the command * @dest: destination node on the card that will receive the command * @cmd: command data * @cmd_size: the command size * * Return 0 - Success, negative - Failure **/ static void prepare_cell(struct hinic_api_cmd_chain *chain, enum hinic_node_id dest, void *cmd, u16 cmd_size) { struct hinic_api_cmd_cell *curr_node = chain->curr_node; u16 data_size = get_cell_data_size(chain->chain_type); prepare_cell_ctrl(&curr_node->ctrl, data_size); prepare_api_cmd(chain, dest, cmd, cmd_size); } static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain) { chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1); } /** * api_cmd_status_update - update the status in the chain struct * @chain: chain to update **/ static void api_cmd_status_update(struct hinic_api_cmd_chain *chain) { enum hinic_api_cmd_chain_type chain_type; struct hinic_api_cmd_status *wb_status; struct hinic_hwif *hwif = chain->hwif; struct pci_dev *pdev = hwif->pdev; u64 status_header; u32 status; wb_status = chain->wb_status; status_header = be64_to_cpu(wb_status->header); status = be32_to_cpu(wb_status->status); if (HINIC_API_CMD_STATUS_GET(status, CHKSUM_ERR)) { dev_err(&pdev->dev, "API CMD status: Xor check error\n"); return; } chain_type = HINIC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID); if (chain_type >= HINIC_API_CMD_MAX) { dev_err(&pdev->dev, "unknown API CMD Chain %d\n", chain_type); return; } chain->cons_idx = HINIC_API_CMD_STATUS_GET(status, CONS_IDX); } /** * wait_for_status_poll - wait for write to api cmd command to complete * @chain: the chain of the command * * Return 0 - Success, negative - Failure **/ static int wait_for_status_poll(struct hinic_api_cmd_chain *chain) { int err = -ETIMEDOUT; unsigned long end; end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT); do { api_cmd_status_update(chain); /* wait for CI to be updated - sign for completion */ if (chain->cons_idx == chain->prod_idx) { err = 0; break; } msleep(20); } while (time_before(jiffies, end)); return err; } /** * wait_for_api_cmd_completion - wait for command to complete * @chain: chain for the command * * Return 0 - Success, negative - Failure **/ static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwif; struct pci_dev *pdev = hwif->pdev; int err; switch (chain->chain_type) { case HINIC_API_CMD_WRITE_TO_MGMT_CPU: err = wait_for_status_poll(chain); if (err) { dev_err(&pdev->dev, "API CMD Poll status timeout\n"); dump_api_chain_reg(chain); break; } break; default: dev_err(&pdev->dev, "unknown API CMD Chain type\n"); err = -EINVAL; break; } return err; } /** * api_cmd - API CMD command * @chain: chain for the command * @dest: destination node on the card that will receive the command * @cmd: command data * @cmd_size: the command size * * Return 0 - Success, negative - Failure **/ static int api_cmd(struct hinic_api_cmd_chain *chain, enum hinic_node_id dest, u8 *cmd, u16 cmd_size) { struct hinic_api_cmd_cell_ctxt *ctxt; int err; down(&chain->sem); if (chain_busy(chain)) { up(&chain->sem); return -EBUSY; } prepare_cell(chain, dest, cmd, cmd_size); cmd_chain_prod_idx_inc(chain); wmb(); /* inc pi before issue the command */ set_prod_idx(chain); /* issue the command */ ctxt = &chain->cell_ctxt[chain->prod_idx]; chain->curr_node = ctxt->cell_vaddr; err = wait_for_api_cmd_completion(chain); up(&chain->sem); return err; } /** * hinic_api_cmd_write - Write API CMD command * @chain: chain for write command * @dest: destination node on the card that will receive the command * @cmd: command data * @size: the command size * * Return 0 - Success, negative - Failure **/ int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, enum hinic_node_id dest, u8 *cmd, u16 size) { /* Verify the chain type */ if (chain->chain_type == HINIC_API_CMD_WRITE_TO_MGMT_CPU) return api_cmd(chain, dest, cmd, size); return -EINVAL; } /** * api_cmd_hw_restart - restart the chain in the HW * @chain: the API CMD specific chain to restart * * Return 0 - Success, negative - Failure **/ static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwif; int err = -ETIMEDOUT; unsigned long end; u32 reg_addr, val; /* Read Modify Write */ reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(chain->chain_type); val = hinic_hwif_read_reg(hwif, reg_addr); val = HINIC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART); val |= HINIC_API_CMD_CHAIN_REQ_SET(1, RESTART); hinic_hwif_write_reg(hwif, reg_addr, val); end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT); do { val = hinic_hwif_read_reg(hwif, reg_addr); if (!HINIC_API_CMD_CHAIN_REQ_GET(val, RESTART)) { err = 0; break; } msleep(20); } while (time_before(jiffies, end)); return err; } /** * api_cmd_ctrl_init - set the control register of a chain * @chain: the API CMD specific chain to set control register for **/ static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwif; u32 addr, ctrl; u16 cell_size; /* Read Modify Write */ addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); cell_size = API_CMD_CELL_SIZE_VAL(chain->cell_size); ctrl = hinic_hwif_read_reg(hwif, addr); ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) & HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(1, XOR_ERR) | HINIC_API_CMD_CHAIN_CTRL_SET(XOR_CHK_ALL, XOR_CHK_EN) | HINIC_API_CMD_CHAIN_CTRL_SET(cell_size, CELL_SIZE); hinic_hwif_write_reg(hwif, addr, ctrl); } /** * api_cmd_set_status_addr - set the status address of a chain in the HW * @chain: the API CMD specific chain to set in HW status address for **/ static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwif; u32 addr, val; addr = HINIC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type); val = upper_32_bits(chain->wb_status_paddr); hinic_hwif_write_reg(hwif, addr, val); addr = HINIC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type); val = lower_32_bits(chain->wb_status_paddr); hinic_hwif_write_reg(hwif, addr, val); } /** * api_cmd_set_num_cells - set the number cells of a chain in the HW * @chain: the API CMD specific chain to set in HW the number of cells for **/ static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwif; u32 addr, val; addr = HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type); val = chain->num_cells; hinic_hwif_write_reg(hwif, addr, val); } /** * api_cmd_head_init - set the head of a chain in the HW * @chain: the API CMD specific chain to set in HW the head for **/ static void api_cmd_head_init(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwif; u32 addr, val; addr = HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type); val = upper_32_bits(chain->head_cell_paddr); hinic_hwif_write_reg(hwif, addr, val); addr = HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type); val = lower_32_bits(chain->head_cell_paddr); hinic_hwif_write_reg(hwif, addr, val); } /** * api_cmd_chain_hw_clean - clean the HW * @chain: the API CMD specific chain **/ static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwif; u32 addr, ctrl; addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); ctrl = hinic_hwif_read_reg(hwif, addr); ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) & HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); hinic_hwif_write_reg(hwif, addr, ctrl); } /** * api_cmd_chain_hw_init - initialize the chain in the HW * @chain: the API CMD specific chain to initialize in HW * * Return 0 - Success, negative - Failure **/ static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwif; struct pci_dev *pdev = hwif->pdev; int err; api_cmd_chain_hw_clean(chain); api_cmd_set_status_addr(chain); err = api_cmd_hw_restart(chain); if (err) { dev_err(&pdev->dev, "Failed to restart API CMD HW\n"); return err; } api_cmd_ctrl_init(chain); api_cmd_set_num_cells(chain); api_cmd_head_init(chain); return 0; } /** * free_cmd_buf - free the dma buffer of API CMD command * @chain: the API CMD specific chain of the cmd * @cell_idx: the cell index of the cmd **/ static void free_cmd_buf(struct hinic_api_cmd_chain *chain, int cell_idx) { struct hinic_api_cmd_cell_ctxt *cell_ctxt; struct hinic_hwif *hwif = chain->hwif; struct pci_dev *pdev = hwif->pdev; cell_ctxt = &chain->cell_ctxt[cell_idx]; dma_free_coherent(&pdev->dev, API_CMD_BUF_SIZE, cell_ctxt->api_cmd_vaddr, cell_ctxt->api_cmd_paddr); } /** * alloc_cmd_buf - allocate a dma buffer for API CMD command * @chain: the API CMD specific chain for the cmd * @cell: the cell in the HW for the cmd * @cell_idx: the index of the cell * * Return 0 - Success, negative - Failure **/ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain, struct hinic_api_cmd_cell *cell, int cell_idx) { struct hinic_api_cmd_cell_ctxt *cell_ctxt; struct hinic_hwif *hwif = chain->hwif; struct pci_dev *pdev = hwif->pdev; dma_addr_t cmd_paddr; u8 *cmd_vaddr; int err = 0; cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, &cmd_paddr, GFP_KERNEL); if (!cmd_vaddr) return -ENOMEM; cell_ctxt = &chain->cell_ctxt[cell_idx]; cell_ctxt->api_cmd_vaddr = cmd_vaddr; cell_ctxt->api_cmd_paddr = cmd_paddr; /* set the cmd DMA address in the cell */ switch (chain->chain_type) { case HINIC_API_CMD_WRITE_TO_MGMT_CPU: /* The data in the HW should be in Big Endian Format */ cell->write.hw_cmd_paddr = cpu_to_be64(cmd_paddr); break; default: dev_err(&pdev->dev, "Unsupported API CMD chain type\n"); free_cmd_buf(chain, cell_idx); err = -EINVAL; break; } return err; } /** * api_cmd_create_cell - create API CMD cell for specific chain * @chain: the API CMD specific chain to create its cell * @cell_idx: the index of the cell to create * @pre_node: previous cell * @node_vaddr: the returned virt addr of the cell * * Return 0 - Success, negative - Failure **/ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, int cell_idx, struct hinic_api_cmd_cell *pre_node, struct hinic_api_cmd_cell **node_vaddr) { struct hinic_api_cmd_cell_ctxt *cell_ctxt; struct hinic_hwif *hwif = chain->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_api_cmd_cell *node; dma_addr_t node_paddr; int err; node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr, GFP_KERNEL); if (!node) return -ENOMEM; node->read.hw_wb_resp_paddr = 0; cell_ctxt = &chain->cell_ctxt[cell_idx]; cell_ctxt->cell_vaddr = node; cell_ctxt->cell_paddr = node_paddr; if (!pre_node) { chain->head_cell_paddr = node_paddr; chain->head_node = node; } else { /* The data in the HW should be in Big Endian Format */ pre_node->next_cell_paddr = cpu_to_be64(node_paddr); } switch (chain->chain_type) { case HINIC_API_CMD_WRITE_TO_MGMT_CPU: err = alloc_cmd_buf(chain, node, cell_idx); if (err) { dev_err(&pdev->dev, "Failed to allocate cmd buffer\n"); goto err_alloc_cmd_buf; } break; default: dev_err(&pdev->dev, "Unsupported API CMD chain type\n"); err = -EINVAL; goto err_alloc_cmd_buf; } *node_vaddr = node; return 0; err_alloc_cmd_buf: dma_free_coherent(&pdev->dev, chain->cell_size, node, node_paddr); return err; } /** * api_cmd_destroy_cell - destroy API CMD cell of specific chain * @chain: the API CMD specific chain to destroy its cell * @cell_idx: the cell to destroy **/ static void api_cmd_destroy_cell(struct hinic_api_cmd_chain *chain, int cell_idx) { struct hinic_api_cmd_cell_ctxt *cell_ctxt; struct hinic_hwif *hwif = chain->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_api_cmd_cell *node; dma_addr_t node_paddr; size_t node_size; cell_ctxt = &chain->cell_ctxt[cell_idx]; node = cell_ctxt->cell_vaddr; node_paddr = cell_ctxt->cell_paddr; node_size = chain->cell_size; if (cell_ctxt->api_cmd_vaddr) { switch (chain->chain_type) { case HINIC_API_CMD_WRITE_TO_MGMT_CPU: free_cmd_buf(chain, cell_idx); break; default: dev_err(&pdev->dev, "Unsupported API CMD chain type\n"); break; } dma_free_coherent(&pdev->dev, node_size, node, node_paddr); } } /** * api_cmd_destroy_cells - destroy API CMD cells of specific chain * @chain: the API CMD specific chain to destroy its cells * @num_cells: number of cells to destroy **/ static void api_cmd_destroy_cells(struct hinic_api_cmd_chain *chain, int num_cells) { int cell_idx; for (cell_idx = 0; cell_idx < num_cells; cell_idx++) api_cmd_destroy_cell(chain, cell_idx); } /** * api_cmd_create_cells - create API CMD cells for specific chain * @chain: the API CMD specific chain * * Return 0 - Success, negative - Failure **/ static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain) { struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL; struct hinic_hwif *hwif = chain->hwif; struct pci_dev *pdev = hwif->pdev; int err, cell_idx; for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) { err = api_cmd_create_cell(chain, cell_idx, pre_node, &node); if (err) { dev_err(&pdev->dev, "Failed to create API CMD cell\n"); goto err_create_cell; } pre_node = node; } /* set the Final node to point on the start */ node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr); /* set the current node to be the head */ chain->curr_node = chain->head_node; return 0; err_create_cell: api_cmd_destroy_cells(chain, cell_idx); return err; } /** * api_chain_init - initialize API CMD specific chain * @chain: the API CMD specific chain to initialize * @attr: attributes to set in the chain * * Return 0 - Success, negative - Failure **/ static int api_chain_init(struct hinic_api_cmd_chain *chain, struct hinic_api_cmd_chain_attr *attr) { struct hinic_hwif *hwif = attr->hwif; struct pci_dev *pdev = hwif->pdev; chain->hwif = hwif; chain->chain_type = attr->chain_type; chain->num_cells = attr->num_cells; chain->cell_size = attr->cell_size; chain->prod_idx = 0; chain->cons_idx = 0; sema_init(&chain->sem, 1); chain->cell_ctxt = devm_kcalloc(&pdev->dev, chain->num_cells, sizeof(*chain->cell_ctxt), GFP_KERNEL); if (!chain->cell_ctxt) return -ENOMEM; chain->wb_status = dma_alloc_coherent(&pdev->dev, sizeof(*chain->wb_status), &chain->wb_status_paddr, GFP_KERNEL); if (!chain->wb_status) { dev_err(&pdev->dev, "Failed to allocate DMA wb status\n"); return -ENOMEM; } return 0; } /** * api_chain_free - free API CMD specific chain * @chain: the API CMD specific chain to free **/ static void api_chain_free(struct hinic_api_cmd_chain *chain) { struct hinic_hwif *hwif = chain->hwif; struct pci_dev *pdev = hwif->pdev; dma_free_coherent(&pdev->dev, sizeof(*chain->wb_status), chain->wb_status, chain->wb_status_paddr); } /** * api_cmd_create_chain - create API CMD specific chain * @attr: attributes to set the chain * * Return the created chain **/ static struct hinic_api_cmd_chain * api_cmd_create_chain(struct hinic_api_cmd_chain_attr *attr) { struct hinic_hwif *hwif = attr->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_api_cmd_chain *chain; int err; if (attr->num_cells & (attr->num_cells - 1)) { dev_err(&pdev->dev, "Invalid number of cells, must be power of 2\n"); return ERR_PTR(-EINVAL); } chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); if (!chain) return ERR_PTR(-ENOMEM); err = api_chain_init(chain, attr); if (err) { dev_err(&pdev->dev, "Failed to initialize chain\n"); return ERR_PTR(err); } err = api_cmd_create_cells(chain); if (err) { dev_err(&pdev->dev, "Failed to create cells for API CMD chain\n"); goto err_create_cells; } err = api_cmd_chain_hw_init(chain); if (err) { dev_err(&pdev->dev, "Failed to initialize chain HW\n"); goto err_chain_hw_init; } return chain; err_chain_hw_init: api_cmd_destroy_cells(chain, chain->num_cells); err_create_cells: api_chain_free(chain); return ERR_PTR(err); } /** * api_cmd_destroy_chain - destroy API CMD specific chain * @chain: the API CMD specific chain to destroy **/ static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain) { api_cmd_chain_hw_clean(chain); api_cmd_destroy_cells(chain, chain->num_cells); api_chain_free(chain); } /** * hinic_api_cmd_init - Initialize all the API CMD chains * @chain: the API CMD chains that are initialized * @hwif: the hardware interface of a pci function device * * Return 0 - Success, negative - Failure **/ int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain, struct hinic_hwif *hwif) { enum hinic_api_cmd_chain_type type, chain_type; struct hinic_api_cmd_chain_attr attr; struct pci_dev *pdev = hwif->pdev; size_t hw_cell_sz; int err; hw_cell_sz = sizeof(struct hinic_api_cmd_cell); attr.hwif = hwif; attr.num_cells = API_CHAIN_NUM_CELLS; attr.cell_size = API_CMD_CELL_SIZE(hw_cell_sz); chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) { attr.chain_type = chain_type; if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU) continue; chain[chain_type] = api_cmd_create_chain(&attr); if (IS_ERR(chain[chain_type])) { dev_err(&pdev->dev, "Failed to create chain %d\n", chain_type); err = PTR_ERR(chain[chain_type]); goto err_create_chain; } } return 0; err_create_chain: type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; for ( ; type < chain_type; type++) { if (type != HINIC_API_CMD_WRITE_TO_MGMT_CPU) continue; api_cmd_destroy_chain(chain[type]); } return err; } /** * hinic_api_cmd_free - free the API CMD chains * @chain: the API CMD chains that are freed **/ void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain) { enum hinic_api_cmd_chain_type chain_type; chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) { if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU) continue; api_cmd_destroy_chain(chain[chain_type]); } }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
// SPDX-License-Identifier: GPL-2.0-only /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/log2.h> #include <linux/err.h> #include <linux/netdevice.h> #include <net/devlink.h> #include "hinic_devlink.h" #include "hinic_sriov.h" #include "hinic_dev.h" #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" #include "hinic_hw_mgmt.h" #include "hinic_hw_qp_ctxt.h" #include "hinic_hw_qp.h" #include "hinic_hw_io.h" #include "hinic_hw_dev.h" #define OUTBOUND_STATE_TIMEOUT 100 #define DB_STATE_TIMEOUT 100 #define MAX_IRQS(max_qps, num_aeqs, num_ceqs) \ (2 * (max_qps) + (num_aeqs) + (num_ceqs)) #define ADDR_IN_4BYTES(addr) ((addr) >> 2) enum intr_type { INTR_MSIX_TYPE, }; /** * parse_capability - convert device capabilities to NIC capabilities * @hwdev: the HW device to set and convert device capabilities for * @dev_cap: device capabilities from FW * * Return 0 - Success, negative - Failure **/ static int parse_capability(struct hinic_hwdev *hwdev, struct hinic_dev_cap *dev_cap) { struct hinic_cap *nic_cap = &hwdev->nic_cap; int num_aeqs, num_ceqs, num_irqs; if (!HINIC_IS_VF(hwdev->hwif) && dev_cap->intr_type != INTR_MSIX_TYPE) return -EFAULT; num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif); num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif); num_irqs = HINIC_HWIF_NUM_IRQS(hwdev->hwif); /* Each QP has its own (SQ + RQ) interrupts */ nic_cap->num_qps = (num_irqs - (num_aeqs + num_ceqs)) / 2; if (nic_cap->num_qps > HINIC_Q_CTXT_MAX) nic_cap->num_qps = HINIC_Q_CTXT_MAX; if (!HINIC_IS_VF(hwdev->hwif)) nic_cap->max_qps = dev_cap->max_sqs + 1; else nic_cap->max_qps = dev_cap->max_sqs; if (nic_cap->num_qps > nic_cap->max_qps) nic_cap->num_qps = nic_cap->max_qps; if (!HINIC_IS_VF(hwdev->hwif)) { nic_cap->max_vf = dev_cap->max_vf; nic_cap->max_vf_qps = dev_cap->max_vf_sqs + 1; } hwdev->port_id = dev_cap->port_id; return 0; } /** * get_capability - get device capabilities from FW * @pfhwdev: the PF HW device to get capabilities for * * Return 0 - Success, negative - Failure **/ static int get_capability(struct hinic_pfhwdev *pfhwdev) { struct hinic_hwdev *hwdev = &pfhwdev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_dev_cap dev_cap; u16 out_len; int err; out_len = sizeof(dev_cap); err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_CFGM, HINIC_CFG_NIC_CAP, &dev_cap, sizeof(dev_cap), &dev_cap, &out_len, HINIC_MGMT_MSG_SYNC); if (err) { dev_err(&pdev->dev, "Failed to get capability from FW\n"); return err; } return parse_capability(hwdev, &dev_cap); } /** * get_dev_cap - get device capabilities * @hwdev: the NIC HW device to get capabilities for * * Return 0 - Success, negative - Failure **/ static int get_dev_cap(struct hinic_hwdev *hwdev) { struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_pfhwdev *pfhwdev; int err; switch (HINIC_FUNC_TYPE(hwif)) { case HINIC_PPF: case HINIC_PF: case HINIC_VF: pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); err = get_capability(pfhwdev); if (err) { dev_err(&pdev->dev, "Failed to get capability\n"); return err; } break; default: dev_err(&pdev->dev, "Unsupported PCI Function type\n"); return -EINVAL; } return 0; } /** * init_msix - enable the msix and save the entries * @hwdev: the NIC HW device * * Return 0 - Success, negative - Failure **/ static int init_msix(struct hinic_hwdev *hwdev) { struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; int nr_irqs, num_aeqs, num_ceqs; int i, err; num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); num_ceqs = HINIC_HWIF_NUM_CEQS(hwif); nr_irqs = MAX_IRQS(HINIC_MAX_QPS, num_aeqs, num_ceqs); if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif)) nr_irqs = HINIC_HWIF_NUM_IRQS(hwif); hwdev->msix_entries = devm_kcalloc(&pdev->dev, nr_irqs, sizeof(*hwdev->msix_entries), GFP_KERNEL); if (!hwdev->msix_entries) return -ENOMEM; for (i = 0; i < nr_irqs; i++) hwdev->msix_entries[i].entry = i; err = pci_enable_msix_exact(pdev, hwdev->msix_entries, nr_irqs); if (err) { dev_err(&pdev->dev, "Failed to enable pci msix\n"); return err; } return 0; } /** * disable_msix - disable the msix * @hwdev: the NIC HW device **/ static void disable_msix(struct hinic_hwdev *hwdev) { struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; pci_disable_msix(pdev); } /** * hinic_port_msg_cmd - send port msg to mgmt * @hwdev: the NIC HW device * @cmd: the port command * @buf_in: input buffer * @in_size: input size * @buf_out: output buffer * @out_size: returned output size * * Return 0 - Success, negative - Failure **/ int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_pfhwdev *pfhwdev; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, cmd, buf_in, in_size, buf_out, out_size, HINIC_MGMT_MSG_SYNC); } int hinic_hilink_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_hilink_cmd cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_pfhwdev *pfhwdev; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_HILINK, cmd, buf_in, in_size, buf_out, out_size, HINIC_MGMT_MSG_SYNC); } /** * init_fw_ctxt- Init Firmware tables before network mgmt and io operations * @hwdev: the NIC HW device * * Return 0 - Success, negative - Failure **/ static int init_fw_ctxt(struct hinic_hwdev *hwdev) { struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_cmd_fw_ctxt fw_ctxt; u16 out_size = sizeof(fw_ctxt); int err; fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif); fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT, &fw_ctxt, sizeof(fw_ctxt), &fw_ctxt, &out_size); if (err || out_size != sizeof(fw_ctxt) || fw_ctxt.status) { dev_err(&pdev->dev, "Failed to init FW ctxt, err: %d, status: 0x%x, out size: 0x%x\n", err, fw_ctxt.status, out_size); return -EIO; } return 0; } /** * set_hw_ioctxt - set the shape of the IO queues in FW * @hwdev: the NIC HW device * @rq_depth: rq depth * @sq_depth: sq depth * * Return 0 - Success, negative - Failure **/ static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int sq_depth, unsigned int rq_depth) { struct hinic_hwif *hwif = hwdev->hwif; struct hinic_cmd_hw_ioctxt hw_ioctxt; struct hinic_pfhwdev *pfhwdev; hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif); hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwif); hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT; hw_ioctxt.cmdq_depth = 0; hw_ioctxt.lro_en = 1; hw_ioctxt.rq_depth = ilog2(rq_depth); hw_ioctxt.rx_buf_sz_idx = HINIC_RX_BUF_SZ_IDX; hw_ioctxt.sq_depth = ilog2(sq_depth); pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, HINIC_COMM_CMD_HWCTXT_SET, &hw_ioctxt, sizeof(hw_ioctxt), NULL, NULL, HINIC_MGMT_MSG_SYNC); } static int wait_for_outbound_state(struct hinic_hwdev *hwdev) { enum hinic_outbound_state outbound_state; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; unsigned long end; end = jiffies + msecs_to_jiffies(OUTBOUND_STATE_TIMEOUT); do { outbound_state = hinic_outbound_state_get(hwif); if (outbound_state == HINIC_OUTBOUND_ENABLE) return 0; msleep(20); } while (time_before(jiffies, end)); dev_err(&pdev->dev, "Wait for OUTBOUND - Timeout\n"); return -EFAULT; } static int wait_for_db_state(struct hinic_hwdev *hwdev) { struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; enum hinic_db_state db_state; unsigned long end; end = jiffies + msecs_to_jiffies(DB_STATE_TIMEOUT); do { db_state = hinic_db_state_get(hwif); if (db_state == HINIC_DB_ENABLE) return 0; msleep(20); } while (time_before(jiffies, end)); dev_err(&pdev->dev, "Wait for DB - Timeout\n"); return -EFAULT; } /** * clear_io_resources - set the IO resources as not active in the NIC * @hwdev: the NIC HW device * * Return 0 - Success, negative - Failure **/ static int clear_io_resources(struct hinic_hwdev *hwdev) { struct hinic_cmd_clear_io_res cmd_clear_io_res; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_pfhwdev *pfhwdev; int err; /* sleep 100ms to wait for firmware stopping I/O */ msleep(100); cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif); pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, HINIC_COMM_CMD_IO_RES_CLEAR, &cmd_clear_io_res, sizeof(cmd_clear_io_res), NULL, NULL, HINIC_MGMT_MSG_SYNC); if (err) { dev_err(&pdev->dev, "Failed to clear IO resources\n"); return err; } return 0; } /** * set_resources_state - set the state of the resources in the NIC * @hwdev: the NIC HW device * @state: the state to set * * Return 0 - Success, negative - Failure **/ static int set_resources_state(struct hinic_hwdev *hwdev, enum hinic_res_state state) { struct hinic_cmd_set_res_state res_state; struct hinic_hwif *hwif = hwdev->hwif; struct hinic_pfhwdev *pfhwdev; res_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif); res_state.state = state; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, HINIC_COMM_CMD_RES_STATE_SET, &res_state, sizeof(res_state), NULL, NULL, HINIC_MGMT_MSG_SYNC); } /** * get_base_qpn - get the first qp number * @hwdev: the NIC HW device * @base_qpn: returned qp number * * Return 0 - Success, negative - Failure **/ static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn) { struct hinic_cmd_base_qpn cmd_base_qpn; struct hinic_hwif *hwif = hwdev->hwif; u16 out_size = sizeof(cmd_base_qpn); struct pci_dev *pdev = hwif->pdev; int err; cmd_base_qpn.func_idx = HINIC_HWIF_FUNC_IDX(hwif); err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_GLOBAL_QPN, &cmd_base_qpn, sizeof(cmd_base_qpn), &cmd_base_qpn, &out_size); if (err || out_size != sizeof(cmd_base_qpn) || cmd_base_qpn.status) { dev_err(&pdev->dev, "Failed to get base qpn, err: %d, status: 0x%x, out size: 0x%x\n", err, cmd_base_qpn.status, out_size); return -EIO; } *base_qpn = cmd_base_qpn.qpn; return 0; } /** * hinic_hwdev_ifup - Preparing the HW for passing IO * @hwdev: the NIC HW device * @sq_depth: the send queue depth * @rq_depth: the receive queue depth * * Return 0 - Success, negative - Failure **/ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth) { struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; struct hinic_cap *nic_cap = &hwdev->nic_cap; struct hinic_hwif *hwif = hwdev->hwif; int err, num_aeqs, num_ceqs, num_qps; struct msix_entry *ceq_msix_entries; struct msix_entry *sq_msix_entries; struct msix_entry *rq_msix_entries; struct pci_dev *pdev = hwif->pdev; u16 base_qpn; err = get_base_qpn(hwdev, &base_qpn); if (err) { dev_err(&pdev->dev, "Failed to get global base qp number\n"); return err; } num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); num_ceqs = HINIC_HWIF_NUM_CEQS(hwif); ceq_msix_entries = &hwdev->msix_entries[num_aeqs]; func_to_io->hwdev = hwdev; func_to_io->sq_depth = sq_depth; func_to_io->rq_depth = rq_depth; func_to_io->global_qpn = base_qpn; err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs, ceq_msix_entries); if (err) { dev_err(&pdev->dev, "Failed to init IO channel\n"); return err; } num_qps = nic_cap->num_qps; sq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs]; rq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs + num_qps]; err = hinic_io_create_qps(func_to_io, base_qpn, num_qps, sq_msix_entries, rq_msix_entries); if (err) { dev_err(&pdev->dev, "Failed to create QPs\n"); goto err_create_qps; } err = wait_for_db_state(hwdev); if (err) { dev_warn(&pdev->dev, "db - disabled, try again\n"); hinic_db_state_set(hwif, HINIC_DB_ENABLE); } err = set_hw_ioctxt(hwdev, sq_depth, rq_depth); if (err) { dev_err(&pdev->dev, "Failed to set HW IO ctxt\n"); goto err_hw_ioctxt; } return 0; err_hw_ioctxt: hinic_io_destroy_qps(func_to_io, num_qps); err_create_qps: hinic_io_free(func_to_io); return err; } /** * hinic_hwdev_ifdown - Closing the HW for passing IO * @hwdev: the NIC HW device * **/ void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev) { struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; struct hinic_cap *nic_cap = &hwdev->nic_cap; clear_io_resources(hwdev); hinic_io_destroy_qps(func_to_io, nic_cap->num_qps); hinic_io_free(func_to_io); } /** * hinic_hwdev_cb_register - register callback handler for MGMT events * @hwdev: the NIC HW device * @cmd: the mgmt event * @handle: private data for the handler * @handler: event handler **/ void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev, enum hinic_mgmt_msg_cmd cmd, void *handle, void (*handler)(void *handle, void *buf_in, u16 in_size, void *buf_out, u16 *out_size)) { struct hinic_pfhwdev *pfhwdev; struct hinic_nic_cb *nic_cb; u8 cmd_cb; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE; nic_cb = &pfhwdev->nic_cb[cmd_cb]; nic_cb->handler = handler; nic_cb->handle = handle; nic_cb->cb_state = HINIC_CB_ENABLED; } /** * hinic_hwdev_cb_unregister - unregister callback handler for MGMT events * @hwdev: the NIC HW device * @cmd: the mgmt event **/ void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev, enum hinic_mgmt_msg_cmd cmd) { struct hinic_hwif *hwif = hwdev->hwif; struct hinic_pfhwdev *pfhwdev; struct hinic_nic_cb *nic_cb; u8 cmd_cb; if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) return; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE; nic_cb = &pfhwdev->nic_cb[cmd_cb]; nic_cb->cb_state &= ~HINIC_CB_ENABLED; while (nic_cb->cb_state & HINIC_CB_RUNNING) schedule(); nic_cb->handler = NULL; } /** * nic_mgmt_msg_handler - nic mgmt event handler * @handle: private data for the handler * @cmd: message command * @buf_in: input buffer * @in_size: input size * @buf_out: output buffer * @out_size: returned output size **/ static void nic_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_pfhwdev *pfhwdev = handle; enum hinic_cb_state cb_state; struct hinic_nic_cb *nic_cb; struct hinic_hwdev *hwdev; struct hinic_hwif *hwif; struct pci_dev *pdev; u8 cmd_cb; hwdev = &pfhwdev->hwdev; hwif = hwdev->hwif; pdev = hwif->pdev; if (cmd < HINIC_MGMT_MSG_CMD_BASE || cmd >= HINIC_MGMT_MSG_CMD_MAX) { dev_err(&pdev->dev, "unknown L2NIC event, cmd = %d\n", cmd); return; } cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE; nic_cb = &pfhwdev->nic_cb[cmd_cb]; cb_state = cmpxchg(&nic_cb->cb_state, HINIC_CB_ENABLED, HINIC_CB_ENABLED | HINIC_CB_RUNNING); if (cb_state == HINIC_CB_ENABLED && nic_cb->handler) nic_cb->handler(nic_cb->handle, buf_in, in_size, buf_out, out_size); else dev_err(&pdev->dev, "Unhandled NIC Event %d\n", cmd); nic_cb->cb_state &= ~HINIC_CB_RUNNING; } static void hinic_comm_recv_mgmt_self_cmd_reg(struct hinic_pfhwdev *pfhwdev, u8 cmd, comm_mgmt_self_msg_proc proc) { u8 cmd_idx; cmd_idx = pfhwdev->proc.cmd_num; if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) { dev_err(&pfhwdev->hwdev.hwif->pdev->dev, "Register recv mgmt process failed, cmd: 0x%x\n", cmd); return; } pfhwdev->proc.info[cmd_idx].cmd = cmd; pfhwdev->proc.info[cmd_idx].proc = proc; pfhwdev->proc.cmd_num++; } static void hinic_comm_recv_mgmt_self_cmd_unreg(struct hinic_pfhwdev *pfhwdev, u8 cmd) { u8 cmd_idx; cmd_idx = pfhwdev->proc.cmd_num; if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) { dev_err(&pfhwdev->hwdev.hwif->pdev->dev, "Unregister recv mgmt process failed, cmd: 0x%x\n", cmd); return; } for (cmd_idx = 0; cmd_idx < HINIC_COMM_SELF_CMD_MAX; cmd_idx++) { if (cmd == pfhwdev->proc.info[cmd_idx].cmd) { pfhwdev->proc.info[cmd_idx].cmd = 0; pfhwdev->proc.info[cmd_idx].proc = NULL; pfhwdev->proc.cmd_num--; } } } static void comm_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_pfhwdev *pfhwdev = handle; u8 cmd_idx; for (cmd_idx = 0; cmd_idx < pfhwdev->proc.cmd_num; cmd_idx++) { if (cmd == pfhwdev->proc.info[cmd_idx].cmd) { if (!pfhwdev->proc.info[cmd_idx].proc) { dev_warn(&pfhwdev->hwdev.hwif->pdev->dev, "PF recv mgmt comm msg handle null, cmd: 0x%x\n", cmd); } else { pfhwdev->proc.info[cmd_idx].proc (&pfhwdev->hwdev, buf_in, in_size, buf_out, out_size); } return; } } dev_warn(&pfhwdev->hwdev.hwif->pdev->dev, "Received unknown mgmt cpu event: 0x%x\n", cmd); *out_size = 0; } /* pf fault report event */ static void pf_fault_event_handler(void *dev, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_cmd_fault_event *fault_event = buf_in; struct hinic_hwdev *hwdev = dev; if (in_size != sizeof(*fault_event)) { dev_err(&hwdev->hwif->pdev->dev, "Invalid fault event report, length: %d, should be %zu\n", in_size, sizeof(*fault_event)); return; } if (!hwdev->devlink_dev || IS_ERR_OR_NULL(hwdev->devlink_dev->hw_fault_reporter)) return; devlink_health_report(hwdev->devlink_dev->hw_fault_reporter, "HW fatal error reported", &fault_event->event); } static void mgmt_watchdog_timeout_event_handler(void *dev, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_mgmt_watchdog_info *watchdog_info = buf_in; struct hinic_hwdev *hwdev = dev; if (in_size != sizeof(*watchdog_info)) { dev_err(&hwdev->hwif->pdev->dev, "Invalid mgmt watchdog report, length: %d, should be %zu\n", in_size, sizeof(*watchdog_info)); return; } if (!hwdev->devlink_dev || IS_ERR_OR_NULL(hwdev->devlink_dev->fw_fault_reporter)) return; devlink_health_report(hwdev->devlink_dev->fw_fault_reporter, "FW fatal error reported", watchdog_info); } /** * init_pfhwdev - Initialize the extended components of PF * @pfhwdev: the HW device for PF * * Return 0 - success, negative - failure **/ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) { struct hinic_hwdev *hwdev = &pfhwdev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; int err; err = hinic_pf_to_mgmt_init(&pfhwdev->pf_to_mgmt, hwif); if (err) { dev_err(&pdev->dev, "Failed to initialize PF to MGMT channel\n"); return err; } err = hinic_func_to_func_init(hwdev); if (err) { dev_err(&hwif->pdev->dev, "Failed to init mailbox\n"); hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); return err; } if (!HINIC_IS_VF(hwif)) { hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, pfhwdev, nic_mgmt_msg_handler); hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, pfhwdev, comm_mgmt_msg_handler); hinic_comm_recv_mgmt_self_cmd_reg(pfhwdev, HINIC_COMM_CMD_FAULT_REPORT, pf_fault_event_handler); hinic_comm_recv_mgmt_self_cmd_reg (pfhwdev, HINIC_COMM_CMD_WATCHDOG_INFO, mgmt_watchdog_timeout_event_handler); } else { hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC, nic_mgmt_msg_handler); } hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE); hinic_devlink_register(hwdev->devlink_dev); return 0; } /** * free_pfhwdev - Free the extended components of PF * @pfhwdev: the HW device for PF **/ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev) { struct hinic_hwdev *hwdev = &pfhwdev->hwdev; hinic_devlink_unregister(hwdev->devlink_dev); hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT); if (!HINIC_IS_VF(hwdev->hwif)) { hinic_comm_recv_mgmt_self_cmd_unreg(pfhwdev, HINIC_COMM_CMD_WATCHDOG_INFO); hinic_comm_recv_mgmt_self_cmd_unreg(pfhwdev, HINIC_COMM_CMD_FAULT_REPORT); hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM); hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC); } else { hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC); } hinic_func_to_func_free(hwdev); hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); } static int hinic_l2nic_reset(struct hinic_hwdev *hwdev) { struct hinic_cmd_l2nic_reset l2nic_reset = {0}; u16 out_size = sizeof(l2nic_reset); struct hinic_pfhwdev *pfhwdev; int err; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); l2nic_reset.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); /* 0 represents standard l2nic reset flow */ l2nic_reset.reset_flag = 0; err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, HINIC_COMM_CMD_L2NIC_RESET, &l2nic_reset, sizeof(l2nic_reset), &l2nic_reset, &out_size, HINIC_MGMT_MSG_SYNC); if (err || !out_size || l2nic_reset.status) { dev_err(&hwdev->hwif->pdev->dev, "Failed to reset L2NIC resources, err: %d, status: 0x%x, out_size: 0x%x\n", err, l2nic_reset.status, out_size); return -EIO; } return 0; } static int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev, struct hinic_msix_config *interrupt_info) { u16 out_size = sizeof(*interrupt_info); struct hinic_pfhwdev *pfhwdev; int err; if (!hwdev || !interrupt_info) return -EINVAL; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); interrupt_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP, interrupt_info, sizeof(*interrupt_info), interrupt_info, &out_size, HINIC_MGMT_MSG_SYNC); if (err || !out_size || interrupt_info->status) { dev_err(&hwdev->hwif->pdev->dev, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n", err, interrupt_info->status, out_size); return -EIO; } return 0; } int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev, struct hinic_msix_config *interrupt_info) { u16 out_size = sizeof(*interrupt_info); struct hinic_msix_config temp_info; struct hinic_pfhwdev *pfhwdev; int err; if (!hwdev) return -EINVAL; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); interrupt_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); err = hinic_get_interrupt_cfg(hwdev, &temp_info); if (err) return -EINVAL; interrupt_info->lli_credit_cnt = temp_info.lli_credit_cnt; interrupt_info->lli_timer_cnt = temp_info.lli_timer_cnt; err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP, interrupt_info, sizeof(*interrupt_info), interrupt_info, &out_size, HINIC_MGMT_MSG_SYNC); if (err || !out_size || interrupt_info->status) { dev_err(&hwdev->hwif->pdev->dev, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n", err, interrupt_info->status, out_size); return -EIO; } return 0; } /** * hinic_init_hwdev - Initialize the NIC HW * @pdev: the NIC pci device * @devlink: the poniter of hinic devlink * * Return initialized NIC HW device * * Initialize the NIC HW device and return a pointer to it **/ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev, struct devlink *devlink) { struct hinic_pfhwdev *pfhwdev; struct hinic_hwdev *hwdev; struct hinic_hwif *hwif; int err, num_aeqs; hwif = devm_kzalloc(&pdev->dev, sizeof(*hwif), GFP_KERNEL); if (!hwif) return ERR_PTR(-ENOMEM); err = hinic_init_hwif(hwif, pdev); if (err) { dev_err(&pdev->dev, "Failed to init HW interface\n"); return ERR_PTR(err); } pfhwdev = devm_kzalloc(&pdev->dev, sizeof(*pfhwdev), GFP_KERNEL); if (!pfhwdev) { err = -ENOMEM; goto err_pfhwdev_alloc; } hwdev = &pfhwdev->hwdev; hwdev->hwif = hwif; hwdev->devlink_dev = devlink_priv(devlink); hwdev->devlink_dev->hwdev = hwdev; err = init_msix(hwdev); if (err) { dev_err(&pdev->dev, "Failed to init msix\n"); goto err_init_msix; } err = wait_for_outbound_state(hwdev); if (err) { dev_warn(&pdev->dev, "outbound - disabled, try again\n"); hinic_outbound_state_set(hwif, HINIC_OUTBOUND_ENABLE); } num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); err = hinic_aeqs_init(&hwdev->aeqs, hwif, num_aeqs, HINIC_DEFAULT_AEQ_LEN, HINIC_EQ_PAGE_SIZE, hwdev->msix_entries); if (err) { dev_err(&pdev->dev, "Failed to init async event queues\n"); goto err_aeqs_init; } err = init_pfhwdev(pfhwdev); if (err) { dev_err(&pdev->dev, "Failed to init PF HW device\n"); goto err_init_pfhwdev; } err = hinic_l2nic_reset(hwdev); if (err) goto err_l2nic_reset; err = get_dev_cap(hwdev); if (err) { dev_err(&pdev->dev, "Failed to get device capabilities\n"); goto err_dev_cap; } mutex_init(&hwdev->func_to_io.nic_cfg.cfg_mutex); err = hinic_vf_func_init(hwdev); if (err) { dev_err(&pdev->dev, "Failed to init nic mbox\n"); goto err_vf_func_init; } err = init_fw_ctxt(hwdev); if (err) { dev_err(&pdev->dev, "Failed to init function table\n"); goto err_init_fw_ctxt; } err = set_resources_state(hwdev, HINIC_RES_ACTIVE); if (err) { dev_err(&pdev->dev, "Failed to set resources state\n"); goto err_resources_state; } return hwdev; err_resources_state: err_init_fw_ctxt: hinic_vf_func_free(hwdev); err_vf_func_init: err_l2nic_reset: err_dev_cap: free_pfhwdev(pfhwdev); err_init_pfhwdev: hinic_aeqs_free(&hwdev->aeqs); err_aeqs_init: disable_msix(hwdev); err_init_msix: err_pfhwdev_alloc: hinic_free_hwif(hwif); if (err > 0) err = -EIO; return ERR_PTR(err); } /** * hinic_free_hwdev - Free the NIC HW device * @hwdev: the NIC HW device **/ void hinic_free_hwdev(struct hinic_hwdev *hwdev) { struct hinic_pfhwdev *pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); set_resources_state(hwdev, HINIC_RES_CLEAN); hinic_vf_func_free(hwdev); free_pfhwdev(pfhwdev); hinic_aeqs_free(&hwdev->aeqs); disable_msix(hwdev); hinic_free_hwif(hwdev->hwif); } /** * hinic_hwdev_num_qps - return the number QPs available for use * @hwdev: the NIC HW device * * Return number QPs available for use **/ int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev) { struct hinic_cap *nic_cap = &hwdev->nic_cap; return nic_cap->num_qps; } /** * hinic_hwdev_get_sq - get SQ * @hwdev: the NIC HW device * @i: the position of the SQ * * Return: the SQ in the i position **/ struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i) { struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; struct hinic_qp *qp = &func_to_io->qps[i]; if (i >= hinic_hwdev_num_qps(hwdev)) return NULL; return &qp->sq; } /** * hinic_hwdev_get_rq - get RQ * @hwdev: the NIC HW device * @i: the position of the RQ * * Return: the RQ in the i position **/ struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i) { struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; struct hinic_qp *qp = &func_to_io->qps[i]; if (i >= hinic_hwdev_num_qps(hwdev)) return NULL; return &qp->rq; } /** * hinic_hwdev_msix_cnt_set - clear message attribute counters for msix entry * @hwdev: the NIC HW device * @msix_index: msix_index * * Return 0 - Success, negative - Failure **/ int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index) { return hinic_msix_attr_cnt_clear(hwdev->hwif, msix_index); } /** * hinic_hwdev_msix_set - set message attribute for msix entry * @hwdev: the NIC HW device * @msix_index: msix_index * @pending_limit: the maximum pending interrupt events (unit 8) * @coalesc_timer: coalesc period for interrupt (unit 8 us) * @lli_timer_cfg: replenishing period for low latency credit (unit 8 us) * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) * @resend_timer: maximum wait for resending msix (unit coalesc period) * * Return 0 - Success, negative - Failure **/ int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index, u8 pending_limit, u8 coalesc_timer, u8 lli_timer_cfg, u8 lli_credit_limit, u8 resend_timer) { return hinic_msix_attr_set(hwdev->hwif, msix_index, pending_limit, coalesc_timer, lli_timer_cfg, lli_credit_limit, resend_timer); } /** * hinic_hwdev_hw_ci_addr_set - set cons idx addr and attributes in HW for sq * @hwdev: the NIC HW device * @sq: send queue * @pending_limit: the maximum pending update ci events (unit 8) * @coalesc_timer: coalesc period for update ci (unit 8 us) * * Return 0 - Success, negative - Failure **/ int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq, u8 pending_limit, u8 coalesc_timer) { struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); struct hinic_hwif *hwif = hwdev->hwif; struct hinic_pfhwdev *pfhwdev; struct hinic_cmd_hw_ci hw_ci; hw_ci.dma_attr_off = 0; hw_ci.pending_limit = pending_limit; hw_ci.coalesc_timer = coalesc_timer; hw_ci.msix_en = 1; hw_ci.msix_entry_idx = sq->msix_entry; hw_ci.func_idx = HINIC_HWIF_FUNC_IDX(hwif); hw_ci.sq_id = qp->q_id; hw_ci.ci_addr = ADDR_IN_4BYTES(sq->hw_ci_dma_addr); pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, HINIC_COMM_CMD_SQ_HI_CI_SET, &hw_ci, sizeof(hw_ci), NULL, NULL, HINIC_MGMT_MSG_SYNC); } /** * hinic_hwdev_set_msix_state- set msix state * @hwdev: the NIC HW device * @msix_index: IRQ corresponding index number * @flag: msix state * **/ void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index, enum hinic_msix_state flag) { hinic_set_msix_state(hwdev->hwif, msix_index, flag); } int hinic_get_board_info(struct hinic_hwdev *hwdev, struct hinic_comm_board_info *board_info) { u16 out_size = sizeof(*board_info); struct hinic_pfhwdev *pfhwdev; int err; if (!hwdev || !board_info) return -EINVAL; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, HINIC_COMM_CMD_GET_BOARD_INFO, board_info, sizeof(*board_info), board_info, &out_size, HINIC_MGMT_MSG_SYNC); if (err || board_info->status || !out_size) { dev_err(&hwdev->hwif->pdev->dev, "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x\n", err, board_info->status, out_size); return -EIO; } return 0; }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
// SPDX-License-Identifier: GPL-2.0-only /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/pci.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/types.h> #include <linux/bitops.h> #include <linux/delay.h> #include "hinic_hw_csr.h" #include "hinic_hw_if.h" #define PCIE_ATTR_ENTRY 0 #define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs) #define WAIT_HWIF_READY_TIMEOUT 10000 #define HINIC_SELFTEST_RESULT 0x883C /** * hinic_msix_attr_set - set message attribute for msix entry * @hwif: the HW interface of a pci function device * @msix_index: msix_index * @pending_limit: the maximum pending interrupt events (unit 8) * @coalesc_timer: coalesc period for interrupt (unit 8 us) * @lli_timer: replenishing period for low latency credit (unit 8 us) * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) * @resend_timer: maximum wait for resending msix (unit coalesc period) * * Return 0 - Success, negative - Failure **/ int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index, u8 pending_limit, u8 coalesc_timer, u8 lli_timer, u8 lli_credit_limit, u8 resend_timer) { u32 msix_ctrl, addr; if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) return -EINVAL; msix_ctrl = HINIC_MSIX_ATTR_SET(pending_limit, PENDING_LIMIT) | HINIC_MSIX_ATTR_SET(coalesc_timer, COALESC_TIMER) | HINIC_MSIX_ATTR_SET(lli_timer, LLI_TIMER) | HINIC_MSIX_ATTR_SET(lli_credit_limit, LLI_CREDIT) | HINIC_MSIX_ATTR_SET(resend_timer, RESEND_TIMER); addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index); hinic_hwif_write_reg(hwif, addr, msix_ctrl); return 0; } /** * hinic_msix_attr_cnt_clear - clear message attribute counters for msix entry * @hwif: the HW interface of a pci function device * @msix_index: msix_index * * Return 0 - Success, negative - Failure **/ int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index) { u32 msix_ctrl, addr; if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) return -EINVAL; msix_ctrl = HINIC_MSIX_CNT_SET(1, RESEND_TIMER); addr = HINIC_CSR_MSIX_CNT_ADDR(msix_index); hinic_hwif_write_reg(hwif, addr, msix_ctrl); return 0; } /** * hinic_set_pf_action - set action on pf channel * @hwif: the HW interface of a pci function device * @action: action on pf channel **/ void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action) { u32 attr5; if (HINIC_IS_VF(hwif)) return; attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR); attr5 = HINIC_FA5_CLEAR(attr5, PF_ACTION); attr5 |= HINIC_FA5_SET(action, PF_ACTION); hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR, attr5); } enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif) { u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); return HINIC_FA4_GET(attr4, OUTBOUND_STATE); } void hinic_outbound_state_set(struct hinic_hwif *hwif, enum hinic_outbound_state outbound_state) { u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); attr4 = HINIC_FA4_CLEAR(attr4, OUTBOUND_STATE); attr4 |= HINIC_FA4_SET(outbound_state, OUTBOUND_STATE); hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4); } enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif) { u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); return HINIC_FA4_GET(attr4, DB_STATE); } void hinic_db_state_set(struct hinic_hwif *hwif, enum hinic_db_state db_state) { u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); attr4 = HINIC_FA4_CLEAR(attr4, DB_STATE); attr4 |= HINIC_FA4_SET(db_state, DB_STATE); hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4); } void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx, enum hinic_msix_state flag) { u32 offset = msix_idx * HINIC_PCI_MSIX_ENTRY_SIZE + HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL; u32 mask_bits; mask_bits = readl(hwif->intr_regs_base + offset); mask_bits &= ~HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT; if (flag) mask_bits |= HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT; writel(mask_bits, hwif->intr_regs_base + offset); } /** * hwif_ready - test if the HW is ready for use * @hwif: the HW interface of a pci function device * * Return 0 - Success, negative - Failure **/ static int hwif_ready(struct hinic_hwif *hwif) { u32 addr, attr1; addr = HINIC_CSR_FUNC_ATTR1_ADDR; attr1 = hinic_hwif_read_reg(hwif, addr); if (!HINIC_FA1_GET(attr1, MGMT_INIT_STATUS)) return -EBUSY; if (HINIC_IS_VF(hwif)) { if (!HINIC_FA1_GET(attr1, PF_INIT_STATUS)) return -EBUSY; } return 0; } static int wait_hwif_ready(struct hinic_hwif *hwif) { unsigned long timeout = 0; do { if (!hwif_ready(hwif)) return 0; usleep_range(999, 1000); timeout++; } while (timeout <= WAIT_HWIF_READY_TIMEOUT); dev_err(&hwif->pdev->dev, "Wait for hwif timeout\n"); return -EBUSY; } /** * set_hwif_attr - set the attributes in the relevant members in hwif * @hwif: the HW interface of a pci function device * @attr0: the first attribute that was read from the hw * @attr1: the second attribute that was read from the hw * @attr2: the third attribute that was read from the hw **/ static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1, u32 attr2) { hwif->attr.func_idx = HINIC_FA0_GET(attr0, FUNC_IDX); hwif->attr.pf_idx = HINIC_FA0_GET(attr0, PF_IDX); hwif->attr.pci_intf_idx = HINIC_FA0_GET(attr0, PCI_INTF_IDX); hwif->attr.func_type = HINIC_FA0_GET(attr0, FUNC_TYPE); hwif->attr.num_aeqs = BIT(HINIC_FA1_GET(attr1, AEQS_PER_FUNC)); hwif->attr.num_ceqs = BIT(HINIC_FA1_GET(attr1, CEQS_PER_FUNC)); hwif->attr.num_irqs = BIT(HINIC_FA1_GET(attr1, IRQS_PER_FUNC)); hwif->attr.num_dma_attr = BIT(HINIC_FA1_GET(attr1, DMA_ATTR_PER_FUNC)); hwif->attr.global_vf_id_of_pf = HINIC_FA2_GET(attr2, GLOBAL_VF_ID_OF_PF); } /** * read_hwif_attr - read the attributes and set members in hwif * @hwif: the HW interface of a pci function device **/ static void read_hwif_attr(struct hinic_hwif *hwif) { u32 addr, attr0, attr1, attr2; addr = HINIC_CSR_FUNC_ATTR0_ADDR; attr0 = hinic_hwif_read_reg(hwif, addr); addr = HINIC_CSR_FUNC_ATTR1_ADDR; attr1 = hinic_hwif_read_reg(hwif, addr); addr = HINIC_CSR_FUNC_ATTR2_ADDR; attr2 = hinic_hwif_read_reg(hwif, addr); set_hwif_attr(hwif, attr0, attr1, attr2); } /** * set_ppf - try to set hwif as ppf and set the type of hwif in this case * @hwif: the HW interface of a pci function device **/ static void set_ppf(struct hinic_hwif *hwif) { struct hinic_func_attr *attr = &hwif->attr; u32 addr, val, ppf_election; /* Read Modify Write */ addr = HINIC_CSR_PPF_ELECTION_ADDR(HINIC_HWIF_PCI_INTF(hwif)); val = hinic_hwif_read_reg(hwif, addr); val = HINIC_PPF_ELECTION_CLEAR(val, IDX); ppf_election = HINIC_PPF_ELECTION_SET(HINIC_HWIF_FUNC_IDX(hwif), IDX); val |= ppf_election; hinic_hwif_write_reg(hwif, addr, val); /* check PPF */ val = hinic_hwif_read_reg(hwif, addr); attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX); if (attr->ppf_idx == HINIC_HWIF_FUNC_IDX(hwif)) attr->func_type = HINIC_PPF; } /** * set_dma_attr - set the dma attributes in the HW * @hwif: the HW interface of a pci function device * @entry_idx: the entry index in the dma table * @st: PCIE TLP steering tag * @at: PCIE TLP AT field * @ph: PCIE TLP Processing Hint field * @no_snooping: PCIE TLP No snooping * @tph_en: PCIE TLP Processing Hint Enable **/ static void set_dma_attr(struct hinic_hwif *hwif, u32 entry_idx, u8 st, u8 at, u8 ph, enum hinic_pcie_nosnoop no_snooping, enum hinic_pcie_tph tph_en) { u32 addr, val, dma_attr_entry; /* Read Modify Write */ addr = HINIC_CSR_DMA_ATTR_ADDR(entry_idx); val = hinic_hwif_read_reg(hwif, addr); val = HINIC_DMA_ATTR_CLEAR(val, ST) & HINIC_DMA_ATTR_CLEAR(val, AT) & HINIC_DMA_ATTR_CLEAR(val, PH) & HINIC_DMA_ATTR_CLEAR(val, NO_SNOOPING) & HINIC_DMA_ATTR_CLEAR(val, TPH_EN); dma_attr_entry = HINIC_DMA_ATTR_SET(st, ST) | HINIC_DMA_ATTR_SET(at, AT) | HINIC_DMA_ATTR_SET(ph, PH) | HINIC_DMA_ATTR_SET(no_snooping, NO_SNOOPING) | HINIC_DMA_ATTR_SET(tph_en, TPH_EN); val |= dma_attr_entry; hinic_hwif_write_reg(hwif, addr, val); } /** * dma_attr_init - initialize the default dma attributes * @hwif: the HW interface of a pci function device **/ static void dma_attr_init(struct hinic_hwif *hwif) { set_dma_attr(hwif, PCIE_ATTR_ENTRY, HINIC_PCIE_ST_DISABLE, HINIC_PCIE_AT_DISABLE, HINIC_PCIE_PH_DISABLE, HINIC_PCIE_SNOOP, HINIC_PCIE_TPH_DISABLE); } u16 hinic_glb_pf_vf_offset(struct hinic_hwif *hwif) { if (!hwif) return 0; return hwif->attr.global_vf_id_of_pf; } u16 hinic_global_func_id_hw(struct hinic_hwif *hwif) { u32 addr, attr0; addr = HINIC_CSR_FUNC_ATTR0_ADDR; attr0 = hinic_hwif_read_reg(hwif, addr); return HINIC_FA0_GET(attr0, FUNC_IDX); } u16 hinic_pf_id_of_vf_hw(struct hinic_hwif *hwif) { u32 addr, attr0; addr = HINIC_CSR_FUNC_ATTR0_ADDR; attr0 = hinic_hwif_read_reg(hwif, addr); return HINIC_FA0_GET(attr0, PF_IDX); } static void __print_selftest_reg(struct hinic_hwif *hwif) { u32 addr, attr0, attr1; addr = HINIC_CSR_FUNC_ATTR1_ADDR; attr1 = hinic_hwif_read_reg(hwif, addr); if (attr1 == HINIC_PCIE_LINK_DOWN) { dev_err(&hwif->pdev->dev, "PCIE is link down\n"); return; } addr = HINIC_CSR_FUNC_ATTR0_ADDR; attr0 = hinic_hwif_read_reg(hwif, addr); if (HINIC_FA0_GET(attr0, FUNC_TYPE) != HINIC_VF && !HINIC_FA0_GET(attr0, PCI_INTF_IDX)) dev_err(&hwif->pdev->dev, "Selftest reg: 0x%08x\n", hinic_hwif_read_reg(hwif, HINIC_SELFTEST_RESULT)); } /** * hinic_init_hwif - initialize the hw interface * @hwif: the HW interface of a pci function device * @pdev: the pci device for accessing PCI resources * * Return 0 - Success, negative - Failure **/ int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev) { int err; hwif->pdev = pdev; hwif->cfg_regs_bar = pci_ioremap_bar(pdev, HINIC_PCI_CFG_REGS_BAR); if (!hwif->cfg_regs_bar) { dev_err(&pdev->dev, "Failed to map configuration regs\n"); return -ENOMEM; } hwif->intr_regs_base = pci_ioremap_bar(pdev, HINIC_PCI_INTR_REGS_BAR); if (!hwif->intr_regs_base) { dev_err(&pdev->dev, "Failed to map configuration regs\n"); err = -ENOMEM; goto err_map_intr_bar; } err = wait_hwif_ready(hwif); if (err) { dev_err(&pdev->dev, "HW interface is not ready\n"); __print_selftest_reg(hwif); goto err_hwif_ready; } read_hwif_attr(hwif); if (HINIC_IS_PF(hwif)) set_ppf(hwif); /* No transactionss before DMA is initialized */ dma_attr_init(hwif); return 0; err_hwif_ready: iounmap(hwif->intr_regs_base); err_map_intr_bar: iounmap(hwif->cfg_regs_bar); return err; } /** * hinic_free_hwif - free the HW interface * @hwif: the HW interface of a pci function device **/ void hinic_free_hwif(struct hinic_hwif *hwif) { iounmap(hwif->intr_regs_base); iounmap(hwif->cfg_regs_bar); }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
// SPDX-License-Identifier: GPL-2.0-only /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/kernel.h> #include <linux/types.h> #include <asm/byteorder.h> #include "hinic_common.h" /** * hinic_cpu_to_be32 - convert data to big endian 32 bit format * @data: the data to convert * @len: length of data to convert **/ void hinic_cpu_to_be32(void *data, int len) { u32 *mem = data; int i; len = len / sizeof(u32); for (i = 0; i < len; i++) { *mem = cpu_to_be32(*mem); mem++; } } /** * hinic_be32_to_cpu - convert data from big endian 32 bit format * @data: the data to convert * @len: length of data to convert **/ void hinic_be32_to_cpu(void *data, int len) { u32 *mem = data; int i; len = len / sizeof(u32); for (i = 0; i < len; i++) { *mem = be32_to_cpu(*mem); mem++; } } /** * hinic_set_sge - set dma area in scatter gather entry * @sge: scatter gather entry * @addr: dma address * @len: length of relevant data in the dma address **/ void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len) { sge->hi_addr = upper_32_bits(addr); sge->lo_addr = lower_32_bits(addr); sge->len = len; } /** * hinic_sge_to_dma - get dma address from scatter gather entry * @sge: scatter gather entry * * Return dma address of sg entry **/ dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge) { return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr); }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_common.c
// SPDX-License-Identifier: GPL-2.0-only /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/etherdevice.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/if_vlan.h> #include <linux/semaphore.h> #include <linux/workqueue.h> #include <net/ip.h> #include <net/devlink.h> #include <linux/bitops.h> #include <linux/bitmap.h> #include <linux/delay.h> #include <linux/err.h> #include "hinic_debugfs.h" #include "hinic_hw_qp.h" #include "hinic_hw_dev.h" #include "hinic_devlink.h" #include "hinic_port.h" #include "hinic_tx.h" #include "hinic_rx.h" #include "hinic_dev.h" #include "hinic_sriov.h" MODULE_AUTHOR("Huawei Technologies CO., Ltd"); MODULE_DESCRIPTION("Huawei Intelligent NIC driver"); MODULE_LICENSE("GPL"); static unsigned int tx_weight = 64; module_param(tx_weight, uint, 0644); MODULE_PARM_DESC(tx_weight, "Number Tx packets for NAPI budget (default=64)"); static unsigned int rx_weight = 64; module_param(rx_weight, uint, 0644); MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)"); #define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822 #define HINIC_DEV_ID_DUAL_PORT_100GE 0x0200 #define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ 0x0205 #define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ 0x0210 #define HINIC_DEV_ID_VF 0x375e #define HINIC_WQ_NAME "hinic_dev" #define MSG_ENABLE_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ NETIF_MSG_IFUP | \ NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) #define HINIC_LRO_MAX_WQE_NUM_DEFAULT 8 #define HINIC_LRO_RX_TIMER_DEFAULT 16 #define work_to_rx_mode_work(work) \ container_of(work, struct hinic_rx_mode_work, work) #define rx_mode_work_to_nic_dev(rx_mode_work) \ container_of(rx_mode_work, struct hinic_dev, rx_mode_work) #define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000 #define HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT 2 #define HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 32 #define HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7 static int change_mac_addr(struct net_device *netdev, const u8 *addr); static int set_features(struct hinic_dev *nic_dev, netdev_features_t pre_features, netdev_features_t features, bool force_change); static void gather_rx_stats(struct hinic_rxq_stats *nic_rx_stats, struct hinic_rxq *rxq) { struct hinic_rxq_stats rx_stats; hinic_rxq_get_stats(rxq, &rx_stats); nic_rx_stats->bytes += rx_stats.bytes; nic_rx_stats->pkts += rx_stats.pkts; nic_rx_stats->errors += rx_stats.errors; nic_rx_stats->csum_errors += rx_stats.csum_errors; nic_rx_stats->other_errors += rx_stats.other_errors; } static void gather_tx_stats(struct hinic_txq_stats *nic_tx_stats, struct hinic_txq *txq) { struct hinic_txq_stats tx_stats; hinic_txq_get_stats(txq, &tx_stats); nic_tx_stats->bytes += tx_stats.bytes; nic_tx_stats->pkts += tx_stats.pkts; nic_tx_stats->tx_busy += tx_stats.tx_busy; nic_tx_stats->tx_wake += tx_stats.tx_wake; nic_tx_stats->tx_dropped += tx_stats.tx_dropped; nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts; } static void gather_nic_stats(struct hinic_dev *nic_dev, struct hinic_rxq_stats *nic_rx_stats, struct hinic_txq_stats *nic_tx_stats) { int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); for (i = 0; i < num_qps; i++) gather_rx_stats(nic_rx_stats, &nic_dev->rxqs[i]); for (i = 0; i < num_qps; i++) gather_tx_stats(nic_tx_stats, &nic_dev->txqs[i]); } /** * create_txqs - Create the Logical Tx Queues of specific NIC device * @nic_dev: the specific NIC device * * Return 0 - Success, negative - Failure **/ static int create_txqs(struct hinic_dev *nic_dev) { int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); struct net_device *netdev = nic_dev->netdev; if (nic_dev->txqs) return -EINVAL; nic_dev->txqs = devm_kcalloc(&netdev->dev, num_txqs, sizeof(*nic_dev->txqs), GFP_KERNEL); if (!nic_dev->txqs) return -ENOMEM; hinic_sq_dbgfs_init(nic_dev); for (i = 0; i < num_txqs; i++) { struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i); err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev); if (err) { netif_err(nic_dev, drv, netdev, "Failed to init Txq\n"); goto err_init_txq; } err = hinic_sq_debug_add(nic_dev, i); if (err) { netif_err(nic_dev, drv, netdev, "Failed to add SQ%d debug\n", i); goto err_add_sq_dbg; } } return 0; err_add_sq_dbg: hinic_clean_txq(&nic_dev->txqs[i]); err_init_txq: for (j = 0; j < i; j++) { hinic_sq_debug_rem(nic_dev->txqs[j].sq); hinic_clean_txq(&nic_dev->txqs[j]); } hinic_sq_dbgfs_uninit(nic_dev); devm_kfree(&netdev->dev, nic_dev->txqs); return err; } static void enable_txqs_napi(struct hinic_dev *nic_dev) { int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); int i; for (i = 0; i < num_txqs; i++) napi_enable(&nic_dev->txqs[i].napi); } static void disable_txqs_napi(struct hinic_dev *nic_dev) { int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); int i; for (i = 0; i < num_txqs; i++) napi_disable(&nic_dev->txqs[i].napi); } /** * free_txqs - Free the Logical Tx Queues of specific NIC device * @nic_dev: the specific NIC device **/ static void free_txqs(struct hinic_dev *nic_dev) { int i, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); struct net_device *netdev = nic_dev->netdev; if (!nic_dev->txqs) return; for (i = 0; i < num_txqs; i++) { hinic_sq_debug_rem(nic_dev->txqs[i].sq); hinic_clean_txq(&nic_dev->txqs[i]); } hinic_sq_dbgfs_uninit(nic_dev); devm_kfree(&netdev->dev, nic_dev->txqs); nic_dev->txqs = NULL; } /** * create_rxqs - Create the Logical Rx Queues of specific NIC device * @nic_dev: the specific NIC device * * Return 0 - Success, negative - Failure **/ static int create_rxqs(struct hinic_dev *nic_dev) { int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); struct net_device *netdev = nic_dev->netdev; if (nic_dev->rxqs) return -EINVAL; nic_dev->rxqs = devm_kcalloc(&netdev->dev, num_rxqs, sizeof(*nic_dev->rxqs), GFP_KERNEL); if (!nic_dev->rxqs) return -ENOMEM; hinic_rq_dbgfs_init(nic_dev); for (i = 0; i < num_rxqs; i++) { struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i); err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev); if (err) { netif_err(nic_dev, drv, netdev, "Failed to init rxq\n"); goto err_init_rxq; } err = hinic_rq_debug_add(nic_dev, i); if (err) { netif_err(nic_dev, drv, netdev, "Failed to add RQ%d debug\n", i); goto err_add_rq_dbg; } } return 0; err_add_rq_dbg: hinic_clean_rxq(&nic_dev->rxqs[i]); err_init_rxq: for (j = 0; j < i; j++) { hinic_rq_debug_rem(nic_dev->rxqs[j].rq); hinic_clean_rxq(&nic_dev->rxqs[j]); } hinic_rq_dbgfs_uninit(nic_dev); devm_kfree(&netdev->dev, nic_dev->rxqs); return err; } /** * free_rxqs - Free the Logical Rx Queues of specific NIC device * @nic_dev: the specific NIC device **/ static void free_rxqs(struct hinic_dev *nic_dev) { int i, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); struct net_device *netdev = nic_dev->netdev; if (!nic_dev->rxqs) return; for (i = 0; i < num_rxqs; i++) { hinic_rq_debug_rem(nic_dev->rxqs[i].rq); hinic_clean_rxq(&nic_dev->rxqs[i]); } hinic_rq_dbgfs_uninit(nic_dev); devm_kfree(&netdev->dev, nic_dev->rxqs); nic_dev->rxqs = NULL; } static int hinic_configure_max_qnum(struct hinic_dev *nic_dev) { return hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps); } static int hinic_rss_init(struct hinic_dev *nic_dev) { u8 default_rss_key[HINIC_RSS_KEY_SIZE]; u8 tmpl_idx = nic_dev->rss_tmpl_idx; u32 *indir_tbl; int err, i; indir_tbl = kcalloc(HINIC_RSS_INDIR_SIZE, sizeof(u32), GFP_KERNEL); if (!indir_tbl) return -ENOMEM; netdev_rss_key_fill(default_rss_key, sizeof(default_rss_key)); for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) indir_tbl[i] = ethtool_rxfh_indir_default(i, nic_dev->num_rss); err = hinic_rss_set_template_tbl(nic_dev, tmpl_idx, default_rss_key); if (err) goto out; err = hinic_rss_set_indir_tbl(nic_dev, tmpl_idx, indir_tbl); if (err) goto out; err = hinic_set_rss_type(nic_dev, tmpl_idx, nic_dev->rss_type); if (err) goto out; err = hinic_rss_set_hash_engine(nic_dev, tmpl_idx, nic_dev->rss_hash_engine); if (err) goto out; err = hinic_rss_cfg(nic_dev, 1, tmpl_idx); if (err) goto out; out: kfree(indir_tbl); return err; } static void hinic_rss_deinit(struct hinic_dev *nic_dev) { hinic_rss_cfg(nic_dev, 0, nic_dev->rss_tmpl_idx); } static void hinic_init_rss_parameters(struct hinic_dev *nic_dev) { nic_dev->rss_hash_engine = HINIC_RSS_HASH_ENGINE_TYPE_XOR; nic_dev->rss_type.tcp_ipv6_ext = 1; nic_dev->rss_type.ipv6_ext = 1; nic_dev->rss_type.tcp_ipv6 = 1; nic_dev->rss_type.ipv6 = 1; nic_dev->rss_type.tcp_ipv4 = 1; nic_dev->rss_type.ipv4 = 1; nic_dev->rss_type.udp_ipv6 = 1; nic_dev->rss_type.udp_ipv4 = 1; } static void hinic_enable_rss(struct hinic_dev *nic_dev) { struct net_device *netdev = nic_dev->netdev; struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; int i, node, err = 0; u16 num_cpus = 0; if (nic_dev->max_qps <= 1) { nic_dev->flags &= ~HINIC_RSS_ENABLE; nic_dev->rss_limit = nic_dev->max_qps; nic_dev->num_qps = nic_dev->max_qps; nic_dev->num_rss = nic_dev->max_qps; return; } err = hinic_rss_template_alloc(nic_dev, &nic_dev->rss_tmpl_idx); if (err) { netif_err(nic_dev, drv, netdev, "Failed to alloc tmpl_idx for rss, can't enable rss for this function\n"); nic_dev->flags &= ~HINIC_RSS_ENABLE; nic_dev->max_qps = 1; nic_dev->rss_limit = nic_dev->max_qps; nic_dev->num_qps = nic_dev->max_qps; nic_dev->num_rss = nic_dev->max_qps; return; } nic_dev->flags |= HINIC_RSS_ENABLE; for (i = 0; i < num_online_cpus(); i++) { node = cpu_to_node(i); if (node == dev_to_node(&pdev->dev)) num_cpus++; } if (!num_cpus) num_cpus = num_online_cpus(); nic_dev->num_qps = hinic_hwdev_num_qps(hwdev); nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus); nic_dev->rss_limit = nic_dev->num_qps; nic_dev->num_rss = nic_dev->num_qps; hinic_init_rss_parameters(nic_dev); err = hinic_rss_init(nic_dev); if (err) netif_err(nic_dev, drv, netdev, "Failed to init rss\n"); } int hinic_open(struct net_device *netdev) { struct hinic_dev *nic_dev = netdev_priv(netdev); enum hinic_port_link_state link_state; int err, ret; if (!(nic_dev->flags & HINIC_INTF_UP)) { err = hinic_hwdev_ifup(nic_dev->hwdev, nic_dev->sq_depth, nic_dev->rq_depth); if (err) { netif_err(nic_dev, drv, netdev, "Failed - HW interface up\n"); return err; } } err = create_txqs(nic_dev); if (err) { netif_err(nic_dev, drv, netdev, "Failed to create Tx queues\n"); goto err_create_txqs; } enable_txqs_napi(nic_dev); err = create_rxqs(nic_dev); if (err) { netif_err(nic_dev, drv, netdev, "Failed to create Rx queues\n"); goto err_create_rxqs; } hinic_enable_rss(nic_dev); err = hinic_configure_max_qnum(nic_dev); if (err) { netif_err(nic_dev, drv, nic_dev->netdev, "Failed to configure the maximum number of queues\n"); goto err_port_state; } netif_set_real_num_tx_queues(netdev, nic_dev->num_qps); netif_set_real_num_rx_queues(netdev, nic_dev->num_qps); err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE); if (err) { netif_err(nic_dev, drv, netdev, "Failed to set port state\n"); goto err_port_state; } err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_ENABLE); if (err) { netif_err(nic_dev, drv, netdev, "Failed to set func port state\n"); goto err_func_port_state; } down(&nic_dev->mgmt_lock); err = hinic_port_link_state(nic_dev, &link_state); if (err) { netif_err(nic_dev, drv, netdev, "Failed to get link state\n"); goto err_port_link; } if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_state); if (link_state == HINIC_LINK_STATE_UP) { nic_dev->flags |= HINIC_LINK_UP; nic_dev->cable_unplugged = false; nic_dev->module_unrecognized = false; } nic_dev->flags |= HINIC_INTF_UP; if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == (HINIC_LINK_UP | HINIC_INTF_UP)) { netif_info(nic_dev, drv, netdev, "link + intf UP\n"); netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); } up(&nic_dev->mgmt_lock); netif_info(nic_dev, drv, netdev, "HINIC_INTF is UP\n"); return 0; err_port_link: up(&nic_dev->mgmt_lock); ret = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); if (ret) netif_warn(nic_dev, drv, netdev, "Failed to revert func port state\n"); err_func_port_state: ret = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); if (ret) netif_warn(nic_dev, drv, netdev, "Failed to revert port state\n"); err_port_state: free_rxqs(nic_dev); if (nic_dev->flags & HINIC_RSS_ENABLE) { hinic_rss_deinit(nic_dev); hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx); } err_create_rxqs: disable_txqs_napi(nic_dev); free_txqs(nic_dev); err_create_txqs: if (!(nic_dev->flags & HINIC_INTF_UP)) hinic_hwdev_ifdown(nic_dev->hwdev); return err; } int hinic_close(struct net_device *netdev) { struct hinic_dev *nic_dev = netdev_priv(netdev); unsigned int flags; /* Disable txq napi firstly to aviod rewaking txq in free_tx_poll */ disable_txqs_napi(nic_dev); down(&nic_dev->mgmt_lock); flags = nic_dev->flags; nic_dev->flags &= ~HINIC_INTF_UP; netif_carrier_off(netdev); netif_tx_disable(netdev); up(&nic_dev->mgmt_lock); if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) hinic_notify_all_vfs_link_changed(nic_dev->hwdev, 0); hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); if (nic_dev->flags & HINIC_RSS_ENABLE) { hinic_rss_deinit(nic_dev); hinic_rss_template_free(nic_dev, nic_dev->rss_tmpl_idx); } free_rxqs(nic_dev); free_txqs(nic_dev); if (flags & HINIC_INTF_UP) hinic_hwdev_ifdown(nic_dev->hwdev); netif_info(nic_dev, drv, netdev, "HINIC_INTF is DOWN\n"); return 0; } static int hinic_change_mtu(struct net_device *netdev, int new_mtu) { struct hinic_dev *nic_dev = netdev_priv(netdev); int err; netif_info(nic_dev, drv, netdev, "set_mtu = %d\n", new_mtu); err = hinic_port_set_mtu(nic_dev, new_mtu); if (err) netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n"); else netdev->mtu = new_mtu; return err; } /** * change_mac_addr - change the main mac address of network device * @netdev: network device * @addr: mac address to set * * Return 0 - Success, negative - Failure **/ static int change_mac_addr(struct net_device *netdev, const u8 *addr) { struct hinic_dev *nic_dev = netdev_priv(netdev); u16 vid = 0; int err; if (!is_valid_ether_addr(addr)) return -EADDRNOTAVAIL; netif_info(nic_dev, drv, netdev, "change mac addr = %02x %02x %02x %02x %02x %02x\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); down(&nic_dev->mgmt_lock); do { err = hinic_port_del_mac(nic_dev, netdev->dev_addr, vid); if (err) { netif_err(nic_dev, drv, netdev, "Failed to delete mac\n"); break; } err = hinic_port_add_mac(nic_dev, addr, vid); if (err) { netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); break; } vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); } while (vid != VLAN_N_VID); up(&nic_dev->mgmt_lock); return err; } static int hinic_set_mac_addr(struct net_device *netdev, void *addr) { unsigned char new_mac[ETH_ALEN]; struct sockaddr *saddr = addr; int err; memcpy(new_mac, saddr->sa_data, ETH_ALEN); err = change_mac_addr(netdev, new_mac); if (!err) eth_hw_addr_set(netdev, new_mac); return err; } /** * add_mac_addr - add mac address to network device * @netdev: network device * @addr: mac address to add * * Return 0 - Success, negative - Failure **/ static int add_mac_addr(struct net_device *netdev, const u8 *addr) { struct hinic_dev *nic_dev = netdev_priv(netdev); u16 vid = 0; int err; netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); down(&nic_dev->mgmt_lock); do { err = hinic_port_add_mac(nic_dev, addr, vid); if (err) { netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); break; } vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); } while (vid != VLAN_N_VID); up(&nic_dev->mgmt_lock); return err; } /** * remove_mac_addr - remove mac address from network device * @netdev: network device * @addr: mac address to remove * * Return 0 - Success, negative - Failure **/ static int remove_mac_addr(struct net_device *netdev, const u8 *addr) { struct hinic_dev *nic_dev = netdev_priv(netdev); u16 vid = 0; int err; if (!is_valid_ether_addr(addr)) return -EADDRNOTAVAIL; netif_info(nic_dev, drv, netdev, "remove mac addr = %02x %02x %02x %02x %02x %02x\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); down(&nic_dev->mgmt_lock); do { err = hinic_port_del_mac(nic_dev, addr, vid); if (err) { netif_err(nic_dev, drv, netdev, "Failed to delete mac\n"); break; } vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); } while (vid != VLAN_N_VID); up(&nic_dev->mgmt_lock); return err; } static int hinic_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) { struct hinic_dev *nic_dev = netdev_priv(netdev); int ret, err; netif_info(nic_dev, drv, netdev, "add vid = %d\n", vid); down(&nic_dev->mgmt_lock); err = hinic_port_add_vlan(nic_dev, vid); if (err) { netif_err(nic_dev, drv, netdev, "Failed to add vlan\n"); goto err_vlan_add; } err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid); if (err && err != HINIC_PF_SET_VF_ALREADY) { netif_err(nic_dev, drv, netdev, "Failed to set mac\n"); goto err_add_mac; } bitmap_set(nic_dev->vlan_bitmap, vid, 1); up(&nic_dev->mgmt_lock); return 0; err_add_mac: ret = hinic_port_del_vlan(nic_dev, vid); if (ret) netif_err(nic_dev, drv, netdev, "Failed to revert by removing vlan\n"); err_vlan_add: up(&nic_dev->mgmt_lock); return err; } static int hinic_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) { struct hinic_dev *nic_dev = netdev_priv(netdev); int err; netif_info(nic_dev, drv, netdev, "remove vid = %d\n", vid); down(&nic_dev->mgmt_lock); err = hinic_port_del_vlan(nic_dev, vid); if (err) { netif_err(nic_dev, drv, netdev, "Failed to delete vlan\n"); goto err_del_vlan; } bitmap_clear(nic_dev->vlan_bitmap, vid, 1); up(&nic_dev->mgmt_lock); return 0; err_del_vlan: up(&nic_dev->mgmt_lock); return err; } static void set_rx_mode(struct work_struct *work) { struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work); struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work); hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode); __dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); __dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); } static void hinic_set_rx_mode(struct net_device *netdev) { struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_rx_mode_work *rx_mode_work; u32 rx_mode; rx_mode_work = &nic_dev->rx_mode_work; rx_mode = HINIC_RX_MODE_UC | HINIC_RX_MODE_MC | HINIC_RX_MODE_BC; if (netdev->flags & IFF_PROMISC) { if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) rx_mode |= HINIC_RX_MODE_PROMISC; } else if (netdev->flags & IFF_ALLMULTI) { rx_mode |= HINIC_RX_MODE_MC_ALL; } rx_mode_work->rx_mode = rx_mode; queue_work(nic_dev->workq, &rx_mode_work->work); } static void hinic_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct hinic_dev *nic_dev = netdev_priv(netdev); u16 sw_pi, hw_ci, sw_ci; struct hinic_sq *sq; u16 num_sqs, q_id; num_sqs = hinic_hwdev_num_qps(nic_dev->hwdev); netif_err(nic_dev, drv, netdev, "Tx timeout\n"); for (q_id = 0; q_id < num_sqs; q_id++) { if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id))) continue; sq = hinic_hwdev_get_sq(nic_dev->hwdev, q_id); sw_pi = atomic_read(&sq->wq->prod_idx) & sq->wq->mask; hw_ci = be16_to_cpu(*(u16 *)(sq->hw_ci_addr)) & sq->wq->mask; sw_ci = atomic_read(&sq->wq->cons_idx) & sq->wq->mask; netif_err(nic_dev, drv, netdev, "Txq%d: sw_pi: %d, hw_ci: %d, sw_ci: %d, napi->state: 0x%lx\n", q_id, sw_pi, hw_ci, sw_ci, nic_dev->txqs[q_id].napi.state); } } static void hinic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_rxq_stats nic_rx_stats = {}; struct hinic_txq_stats nic_tx_stats = {}; if (nic_dev->flags & HINIC_INTF_UP) gather_nic_stats(nic_dev, &nic_rx_stats, &nic_tx_stats); stats->rx_bytes = nic_rx_stats.bytes; stats->rx_packets = nic_rx_stats.pkts; stats->rx_errors = nic_rx_stats.errors; stats->tx_bytes = nic_tx_stats.bytes; stats->tx_packets = nic_tx_stats.pkts; stats->tx_errors = nic_tx_stats.tx_dropped; } static int hinic_set_features(struct net_device *netdev, netdev_features_t features) { struct hinic_dev *nic_dev = netdev_priv(netdev); return set_features(nic_dev, nic_dev->netdev->features, features, false); } static netdev_features_t hinic_fix_features(struct net_device *netdev, netdev_features_t features) { struct hinic_dev *nic_dev = netdev_priv(netdev); /* If Rx checksum is disabled, then LRO should also be disabled */ if (!(features & NETIF_F_RXCSUM)) { netif_info(nic_dev, drv, netdev, "disabling LRO as RXCSUM is off\n"); features &= ~NETIF_F_LRO; } return features; } static const struct net_device_ops hinic_netdev_ops = { .ndo_open = hinic_open, .ndo_stop = hinic_close, .ndo_change_mtu = hinic_change_mtu, .ndo_set_mac_address = hinic_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, .ndo_set_rx_mode = hinic_set_rx_mode, .ndo_start_xmit = hinic_xmit_frame, .ndo_tx_timeout = hinic_tx_timeout, .ndo_get_stats64 = hinic_get_stats64, .ndo_fix_features = hinic_fix_features, .ndo_set_features = hinic_set_features, .ndo_set_vf_mac = hinic_ndo_set_vf_mac, .ndo_set_vf_vlan = hinic_ndo_set_vf_vlan, .ndo_get_vf_config = hinic_ndo_get_vf_config, .ndo_set_vf_trust = hinic_ndo_set_vf_trust, .ndo_set_vf_rate = hinic_ndo_set_vf_bw, .ndo_set_vf_spoofchk = hinic_ndo_set_vf_spoofchk, .ndo_set_vf_link_state = hinic_ndo_set_vf_link_state, }; static const struct net_device_ops hinicvf_netdev_ops = { .ndo_open = hinic_open, .ndo_stop = hinic_close, .ndo_change_mtu = hinic_change_mtu, .ndo_set_mac_address = hinic_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, .ndo_set_rx_mode = hinic_set_rx_mode, .ndo_start_xmit = hinic_xmit_frame, .ndo_tx_timeout = hinic_tx_timeout, .ndo_get_stats64 = hinic_get_stats64, .ndo_fix_features = hinic_fix_features, .ndo_set_features = hinic_set_features, }; static void netdev_features_init(struct net_device *netdev) { netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->vlan_features = netdev->hw_features; netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SCTP_CRC | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_UDP_TUNNEL; } static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev) { struct hinic_nic_cfg *nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg; struct hinic_pause_config pause_info = {0}; struct hinic_port_cap port_cap = {0}; if (hinic_port_get_cap(nic_dev, &port_cap)) return; mutex_lock(&nic_cfg->cfg_mutex); if (nic_cfg->pause_set || !port_cap.autoneg_state) { nic_cfg->auto_neg = port_cap.autoneg_state; pause_info.auto_neg = nic_cfg->auto_neg; pause_info.rx_pause = nic_cfg->rx_pause; pause_info.tx_pause = nic_cfg->tx_pause; hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info); } mutex_unlock(&nic_cfg->cfg_mutex); } /** * link_status_event_handler - link event handler * @handle: nic device for the handler * @buf_in: input buffer * @in_size: input size * @buf_out: output buffer * @out_size: returned output size **/ static void link_status_event_handler(void *handle, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_port_link_status *link_status, *ret_link_status; struct hinic_dev *nic_dev = handle; link_status = buf_in; if (link_status->link == HINIC_LINK_STATE_UP) { down(&nic_dev->mgmt_lock); nic_dev->flags |= HINIC_LINK_UP; nic_dev->cable_unplugged = false; nic_dev->module_unrecognized = false; if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == (HINIC_LINK_UP | HINIC_INTF_UP)) { netif_carrier_on(nic_dev->netdev); netif_tx_wake_all_queues(nic_dev->netdev); } up(&nic_dev->mgmt_lock); if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) hinic_refresh_nic_cfg(nic_dev); netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n"); } else { down(&nic_dev->mgmt_lock); nic_dev->flags &= ~HINIC_LINK_UP; netif_carrier_off(nic_dev->netdev); netif_tx_disable(nic_dev->netdev); up(&nic_dev->mgmt_lock); netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n"); } if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_status->link); ret_link_status = buf_out; ret_link_status->status = 0; *out_size = sizeof(*ret_link_status); } static void cable_plug_event(void *handle, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_cable_plug_event *plug_event = buf_in; struct hinic_dev *nic_dev = handle; nic_dev->cable_unplugged = plug_event->plugged ? false : true; *out_size = sizeof(*plug_event); plug_event = buf_out; plug_event->status = 0; } static void link_err_event(void *handle, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_link_err_event *link_err = buf_in; struct hinic_dev *nic_dev = handle; if (link_err->err_type >= LINK_ERR_NUM) netif_info(nic_dev, link, nic_dev->netdev, "Link failed, Unknown error type: 0x%x\n", link_err->err_type); else nic_dev->module_unrecognized = true; *out_size = sizeof(*link_err); link_err = buf_out; link_err->status = 0; } static int set_features(struct hinic_dev *nic_dev, netdev_features_t pre_features, netdev_features_t features, bool force_change) { netdev_features_t changed = force_change ? ~0 : pre_features ^ features; u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN; netdev_features_t failed_features = 0; int ret = 0; int err = 0; if (changed & NETIF_F_TSO) { ret = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ? HINIC_TSO_ENABLE : HINIC_TSO_DISABLE); if (ret) { err = ret; failed_features |= NETIF_F_TSO; } } if (changed & NETIF_F_RXCSUM) { ret = hinic_set_rx_csum_offload(nic_dev, csum_en); if (ret) { err = ret; failed_features |= NETIF_F_RXCSUM; } } if (changed & NETIF_F_LRO) { ret = hinic_set_rx_lro_state(nic_dev, !!(features & NETIF_F_LRO), HINIC_LRO_RX_TIMER_DEFAULT, HINIC_LRO_MAX_WQE_NUM_DEFAULT); if (ret) { err = ret; failed_features |= NETIF_F_LRO; } } if (changed & NETIF_F_HW_VLAN_CTAG_RX) { ret = hinic_set_rx_vlan_offload(nic_dev, !!(features & NETIF_F_HW_VLAN_CTAG_RX)); if (ret) { err = ret; failed_features |= NETIF_F_HW_VLAN_CTAG_RX; } } if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { ret = hinic_set_vlan_fliter(nic_dev, !!(features & NETIF_F_HW_VLAN_CTAG_FILTER)); if (ret) { err = ret; failed_features |= NETIF_F_HW_VLAN_CTAG_FILTER; } } if (err) { nic_dev->netdev->features = features ^ failed_features; return -EIO; } return 0; } static int hinic_init_intr_coalesce(struct hinic_dev *nic_dev) { u64 size; u16 i; size = sizeof(struct hinic_intr_coal_info) * nic_dev->max_qps; nic_dev->rx_intr_coalesce = kzalloc(size, GFP_KERNEL); if (!nic_dev->rx_intr_coalesce) return -ENOMEM; nic_dev->tx_intr_coalesce = kzalloc(size, GFP_KERNEL); if (!nic_dev->tx_intr_coalesce) { kfree(nic_dev->rx_intr_coalesce); return -ENOMEM; } for (i = 0; i < nic_dev->max_qps; i++) { nic_dev->rx_intr_coalesce[i].pending_limt = HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT; nic_dev->rx_intr_coalesce[i].coalesce_timer_cfg = HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG; nic_dev->rx_intr_coalesce[i].resend_timer_cfg = HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG; nic_dev->tx_intr_coalesce[i].pending_limt = HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT; nic_dev->tx_intr_coalesce[i].coalesce_timer_cfg = HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG; nic_dev->tx_intr_coalesce[i].resend_timer_cfg = HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG; } return 0; } static void hinic_free_intr_coalesce(struct hinic_dev *nic_dev) { kfree(nic_dev->tx_intr_coalesce); kfree(nic_dev->rx_intr_coalesce); } /** * nic_dev_init - Initialize the NIC device * @pdev: the NIC pci device * * Return 0 - Success, negative - Failure **/ static int nic_dev_init(struct pci_dev *pdev) { struct hinic_rx_mode_work *rx_mode_work; struct hinic_dev *nic_dev; struct net_device *netdev; struct hinic_hwdev *hwdev; struct devlink *devlink; u8 addr[ETH_ALEN]; int err, num_qps; devlink = hinic_devlink_alloc(&pdev->dev); if (!devlink) { dev_err(&pdev->dev, "Hinic devlink alloc failed\n"); return -ENOMEM; } hwdev = hinic_init_hwdev(pdev, devlink); if (IS_ERR(hwdev)) { dev_err(&pdev->dev, "Failed to initialize HW device\n"); hinic_devlink_free(devlink); return PTR_ERR(hwdev); } num_qps = hinic_hwdev_num_qps(hwdev); if (num_qps <= 0) { dev_err(&pdev->dev, "Invalid number of QPS\n"); err = -EINVAL; goto err_num_qps; } netdev = alloc_etherdev_mq(sizeof(*nic_dev), num_qps); if (!netdev) { dev_err(&pdev->dev, "Failed to allocate Ethernet device\n"); err = -ENOMEM; goto err_alloc_etherdev; } if (!HINIC_IS_VF(hwdev->hwif)) netdev->netdev_ops = &hinic_netdev_ops; else netdev->netdev_ops = &hinicvf_netdev_ops; netdev->max_mtu = HINIC_MAX_MTU_SIZE; netdev->min_mtu = HINIC_MIN_MTU_SIZE; nic_dev = netdev_priv(netdev); nic_dev->netdev = netdev; nic_dev->hwdev = hwdev; nic_dev->msg_enable = MSG_ENABLE_DEFAULT; nic_dev->flags = 0; nic_dev->txqs = NULL; nic_dev->rxqs = NULL; nic_dev->tx_weight = tx_weight; nic_dev->rx_weight = rx_weight; nic_dev->sq_depth = HINIC_SQ_DEPTH; nic_dev->rq_depth = HINIC_RQ_DEPTH; nic_dev->sriov_info.hwdev = hwdev; nic_dev->sriov_info.pdev = pdev; nic_dev->max_qps = num_qps; nic_dev->devlink = devlink; hinic_set_ethtool_ops(netdev); sema_init(&nic_dev->mgmt_lock, 1); nic_dev->vlan_bitmap = devm_bitmap_zalloc(&pdev->dev, VLAN_N_VID, GFP_KERNEL); if (!nic_dev->vlan_bitmap) { err = -ENOMEM; goto err_vlan_bitmap; } nic_dev->workq = create_singlethread_workqueue(HINIC_WQ_NAME); if (!nic_dev->workq) { err = -ENOMEM; goto err_workq; } pci_set_drvdata(pdev, netdev); err = hinic_port_get_mac(nic_dev, addr); if (err) { dev_err(&pdev->dev, "Failed to get mac address\n"); goto err_get_mac; } eth_hw_addr_set(netdev, addr); if (!is_valid_ether_addr(netdev->dev_addr)) { if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) { dev_err(&pdev->dev, "Invalid MAC address\n"); err = -EIO; goto err_add_mac; } dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", netdev->dev_addr); eth_hw_addr_random(netdev); } err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0); if (err && err != HINIC_PF_SET_VF_ALREADY) { dev_err(&pdev->dev, "Failed to add mac\n"); goto err_add_mac; } err = hinic_port_set_mtu(nic_dev, netdev->mtu); if (err) { dev_err(&pdev->dev, "Failed to set mtu\n"); goto err_set_mtu; } rx_mode_work = &nic_dev->rx_mode_work; INIT_WORK(&rx_mode_work->work, set_rx_mode); netdev_features_init(netdev); netif_carrier_off(netdev); hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, nic_dev, link_status_event_handler); hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT, nic_dev, cable_plug_event); hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT, nic_dev, link_err_event); err = set_features(nic_dev, 0, nic_dev->netdev->features, true); if (err) goto err_set_features; /* enable pause and disable pfc by default */ err = hinic_dcb_set_pfc(nic_dev->hwdev, 0, 0); if (err) goto err_set_pfc; SET_NETDEV_DEV(netdev, &pdev->dev); err = hinic_init_intr_coalesce(nic_dev); if (err) { dev_err(&pdev->dev, "Failed to init_intr_coalesce\n"); goto err_init_intr; } hinic_dbg_init(nic_dev); hinic_func_tbl_dbgfs_init(nic_dev); err = hinic_func_table_debug_add(nic_dev); if (err) { dev_err(&pdev->dev, "Failed to add func_table debug\n"); goto err_add_func_table_dbg; } err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "Failed to register netdev\n"); goto err_reg_netdev; } return 0; err_reg_netdev: hinic_func_table_debug_rem(nic_dev); err_add_func_table_dbg: hinic_func_tbl_dbgfs_uninit(nic_dev); hinic_dbg_uninit(nic_dev); hinic_free_intr_coalesce(nic_dev); err_init_intr: err_set_pfc: err_set_features: hinic_hwdev_cb_unregister(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT); hinic_hwdev_cb_unregister(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT); hinic_hwdev_cb_unregister(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS); cancel_work_sync(&rx_mode_work->work); err_set_mtu: hinic_port_del_mac(nic_dev, netdev->dev_addr, 0); err_add_mac: err_get_mac: pci_set_drvdata(pdev, NULL); destroy_workqueue(nic_dev->workq); err_workq: err_vlan_bitmap: free_netdev(netdev); err_alloc_etherdev: err_num_qps: hinic_free_hwdev(hwdev); hinic_devlink_free(devlink); return err; } static int hinic_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err = pci_enable_device(pdev); if (err) return dev_err_probe(&pdev->dev, err, "Failed to enable PCI device\n"); err = pci_request_regions(pdev, HINIC_DRV_NAME); if (err) { dev_err(&pdev->dev, "Failed to request PCI regions\n"); goto err_pci_regions; } pci_set_master(pdev); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "Failed to set DMA mask\n"); goto err_dma_mask; } err = nic_dev_init(pdev); if (err) { dev_err(&pdev->dev, "Failed to initialize NIC device\n"); goto err_nic_dev_init; } dev_info(&pdev->dev, "HiNIC driver - probed\n"); return 0; err_nic_dev_init: err_dma_mask: pci_release_regions(pdev); err_pci_regions: pci_disable_device(pdev); return err; } static void wait_sriov_cfg_complete(struct hinic_dev *nic_dev) { struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info; u32 loop_cnt = 0; set_bit(HINIC_FUNC_REMOVE, &sriov_info->state); usleep_range(9900, 10000); while (loop_cnt < HINIC_WAIT_SRIOV_CFG_TIMEOUT) { if (!test_bit(HINIC_SRIOV_ENABLE, &sriov_info->state) && !test_bit(HINIC_SRIOV_DISABLE, &sriov_info->state)) return; usleep_range(9900, 10000); loop_cnt++; } } static void hinic_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct hinic_dev *nic_dev = netdev_priv(netdev); struct devlink *devlink = nic_dev->devlink; struct hinic_rx_mode_work *rx_mode_work; if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) { wait_sriov_cfg_complete(nic_dev); hinic_pci_sriov_disable(pdev); } unregister_netdev(netdev); hinic_func_table_debug_rem(nic_dev); hinic_func_tbl_dbgfs_uninit(nic_dev); hinic_dbg_uninit(nic_dev); hinic_free_intr_coalesce(nic_dev); hinic_port_del_mac(nic_dev, netdev->dev_addr, 0); hinic_hwdev_cb_unregister(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT); hinic_hwdev_cb_unregister(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT); hinic_hwdev_cb_unregister(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS); rx_mode_work = &nic_dev->rx_mode_work; cancel_work_sync(&rx_mode_work->work); pci_set_drvdata(pdev, NULL); destroy_workqueue(nic_dev->workq); hinic_free_hwdev(nic_dev->hwdev); free_netdev(netdev); hinic_devlink_free(devlink); pci_release_regions(pdev); pci_disable_device(pdev); dev_info(&pdev->dev, "HiNIC driver - removed\n"); } static void hinic_shutdown(struct pci_dev *pdev) { pci_disable_device(pdev); } static const struct pci_device_id hinic_pci_table[] = { { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0}, { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0}, { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0}, { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0}, { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_VF), 0}, { 0, 0} }; MODULE_DEVICE_TABLE(pci, hinic_pci_table); static struct pci_driver hinic_driver = { .name = HINIC_DRV_NAME, .id_table = hinic_pci_table, .probe = hinic_probe, .remove = hinic_remove, .shutdown = hinic_shutdown, .sriov_configure = hinic_pci_sriov_configure, }; static int __init hinic_module_init(void) { int ret; hinic_dbg_register_debugfs(HINIC_DRV_NAME); ret = pci_register_driver(&hinic_driver); if (ret) hinic_dbg_unregister_debugfs(); return ret; } static void __exit hinic_module_exit(void) { pci_unregister_driver(&hinic_driver); hinic_dbg_unregister_debugfs(); } module_init(hinic_module_init); module_exit(hinic_module_exit);
linux-master
drivers/net/ethernet/huawei/hinic/hinic_main.c
// SPDX-License-Identifier: GPL-2.0-only /* Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/debugfs.h> #include <linux/device.h> #include "hinic_debugfs.h" static struct dentry *hinic_dbgfs_root; enum sq_dbg_info { GLB_SQ_ID, SQ_PI, SQ_CI, SQ_FI, SQ_MSIX_ENTRY, }; static char *sq_fields[] = {"glb_sq_id", "sq_pi", "sq_ci", "sq_fi", "sq_msix_entry"}; static u64 hinic_dbg_get_sq_info(struct hinic_dev *nic_dev, struct hinic_sq *sq, int idx) { struct hinic_wq *wq = sq->wq; switch (idx) { case GLB_SQ_ID: return nic_dev->hwdev->func_to_io.global_qpn + sq->qid; case SQ_PI: return atomic_read(&wq->prod_idx) & wq->mask; case SQ_CI: return atomic_read(&wq->cons_idx) & wq->mask; case SQ_FI: return be16_to_cpu(*(__be16 *)(sq->hw_ci_addr)) & wq->mask; case SQ_MSIX_ENTRY: return sq->msix_entry; } return 0; } enum rq_dbg_info { GLB_RQ_ID, RQ_HW_PI, RQ_SW_CI, RQ_SW_PI, RQ_MSIX_ENTRY, }; static char *rq_fields[] = {"glb_rq_id", "rq_hw_pi", "rq_sw_ci", "rq_sw_pi", "rq_msix_entry"}; static u64 hinic_dbg_get_rq_info(struct hinic_dev *nic_dev, struct hinic_rq *rq, int idx) { struct hinic_wq *wq = rq->wq; switch (idx) { case GLB_RQ_ID: return nic_dev->hwdev->func_to_io.global_qpn + rq->qid; case RQ_HW_PI: return be16_to_cpu(*(__be16 *)(rq->pi_virt_addr)) & wq->mask; case RQ_SW_CI: return atomic_read(&wq->cons_idx) & wq->mask; case RQ_SW_PI: return atomic_read(&wq->prod_idx) & wq->mask; case RQ_MSIX_ENTRY: return rq->msix_entry; } return 0; } enum func_tbl_info { VALID, RX_MODE, MTU, RQ_DEPTH, QUEUE_NUM, }; static char *func_table_fields[] = {"valid", "rx_mode", "mtu", "rq_depth", "cfg_q_num"}; static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx) { struct tag_sml_funcfg_tbl *funcfg_table_elem; struct hinic_cmd_lt_rd *read_data; u16 out_size = sizeof(*read_data); int ret = ~0; int err; read_data = kzalloc(sizeof(*read_data), GFP_KERNEL); if (!read_data) return ~0; read_data->node = TBL_ID_FUNC_CFG_SM_NODE; read_data->inst = TBL_ID_FUNC_CFG_SM_INST; read_data->entry_size = HINIC_FUNCTION_CONFIGURE_TABLE_SIZE; read_data->lt_index = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif); read_data->len = HINIC_FUNCTION_CONFIGURE_TABLE_SIZE; err = hinic_port_msg_cmd(nic_dev->hwdev, HINIC_PORT_CMD_RD_LINE_TBL, read_data, sizeof(*read_data), read_data, &out_size); if (err || out_size != sizeof(*read_data) || read_data->status) { netif_err(nic_dev, drv, nic_dev->netdev, "Failed to get func table, err: %d, status: 0x%x, out size: 0x%x\n", err, read_data->status, out_size); kfree(read_data); return ~0; } funcfg_table_elem = (struct tag_sml_funcfg_tbl *)read_data->data; switch (idx) { case VALID: ret = funcfg_table_elem->dw0.bs.valid; break; case RX_MODE: ret = funcfg_table_elem->dw0.bs.nic_rx_mode; break; case MTU: ret = funcfg_table_elem->dw1.bs.mtu; break; case RQ_DEPTH: ret = funcfg_table_elem->dw13.bs.cfg_rq_depth; break; case QUEUE_NUM: ret = funcfg_table_elem->dw13.bs.cfg_q_num; break; } kfree(read_data); return ret; } static ssize_t hinic_dbg_cmd_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct hinic_debug_priv *dbg; char ret_buf[20]; int *desc; u64 out; int ret; desc = filp->private_data; dbg = container_of(desc, struct hinic_debug_priv, field_id[*desc]); switch (dbg->type) { case HINIC_DBG_SQ_INFO: out = hinic_dbg_get_sq_info(dbg->dev, dbg->object, *desc); break; case HINIC_DBG_RQ_INFO: out = hinic_dbg_get_rq_info(dbg->dev, dbg->object, *desc); break; case HINIC_DBG_FUNC_TABLE: out = hinic_dbg_get_func_table(dbg->dev, *desc); break; default: netif_warn(dbg->dev, drv, dbg->dev->netdev, "Invalid hinic debug cmd: %d\n", dbg->type); return -EINVAL; } ret = snprintf(ret_buf, sizeof(ret_buf), "0x%llx\n", out); return simple_read_from_buffer(buffer, count, ppos, ret_buf, ret); } static const struct file_operations hinic_dbg_cmd_fops = { .owner = THIS_MODULE, .open = simple_open, .read = hinic_dbg_cmd_read, }; static int create_dbg_files(struct hinic_dev *dev, enum hinic_dbg_type type, void *data, struct dentry *root, struct hinic_debug_priv **dbg, char **field, int nfile) { struct hinic_debug_priv *tmp; int i; tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) return -ENOMEM; tmp->dev = dev; tmp->object = data; tmp->type = type; tmp->root = root; for (i = 0; i < nfile; i++) { tmp->field_id[i] = i; debugfs_create_file(field[i], 0400, root, &tmp->field_id[i], &hinic_dbg_cmd_fops); } *dbg = tmp; return 0; } static void rem_dbg_files(struct hinic_debug_priv *dbg) { if (dbg->type != HINIC_DBG_FUNC_TABLE) debugfs_remove_recursive(dbg->root); kfree(dbg); } int hinic_sq_debug_add(struct hinic_dev *dev, u16 sq_id) { struct hinic_sq *sq; struct dentry *root; char sub_dir[16]; sq = dev->txqs[sq_id].sq; sprintf(sub_dir, "0x%x", sq_id); root = debugfs_create_dir(sub_dir, dev->sq_dbgfs); return create_dbg_files(dev, HINIC_DBG_SQ_INFO, sq, root, &sq->dbg, sq_fields, ARRAY_SIZE(sq_fields)); } void hinic_sq_debug_rem(struct hinic_sq *sq) { if (sq->dbg) rem_dbg_files(sq->dbg); } int hinic_rq_debug_add(struct hinic_dev *dev, u16 rq_id) { struct hinic_rq *rq; struct dentry *root; char sub_dir[16]; rq = dev->rxqs[rq_id].rq; sprintf(sub_dir, "0x%x", rq_id); root = debugfs_create_dir(sub_dir, dev->rq_dbgfs); return create_dbg_files(dev, HINIC_DBG_RQ_INFO, rq, root, &rq->dbg, rq_fields, ARRAY_SIZE(rq_fields)); } void hinic_rq_debug_rem(struct hinic_rq *rq) { if (rq->dbg) rem_dbg_files(rq->dbg); } int hinic_func_table_debug_add(struct hinic_dev *dev) { if (HINIC_IS_VF(dev->hwdev->hwif)) return 0; return create_dbg_files(dev, HINIC_DBG_FUNC_TABLE, dev, dev->func_tbl_dbgfs, &dev->dbg, func_table_fields, ARRAY_SIZE(func_table_fields)); } void hinic_func_table_debug_rem(struct hinic_dev *dev) { if (!HINIC_IS_VF(dev->hwdev->hwif) && dev->dbg) rem_dbg_files(dev->dbg); } void hinic_sq_dbgfs_init(struct hinic_dev *nic_dev) { nic_dev->sq_dbgfs = debugfs_create_dir("SQs", nic_dev->dbgfs_root); } void hinic_sq_dbgfs_uninit(struct hinic_dev *nic_dev) { debugfs_remove_recursive(nic_dev->sq_dbgfs); } void hinic_rq_dbgfs_init(struct hinic_dev *nic_dev) { nic_dev->rq_dbgfs = debugfs_create_dir("RQs", nic_dev->dbgfs_root); } void hinic_rq_dbgfs_uninit(struct hinic_dev *nic_dev) { debugfs_remove_recursive(nic_dev->rq_dbgfs); } void hinic_func_tbl_dbgfs_init(struct hinic_dev *nic_dev) { if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) nic_dev->func_tbl_dbgfs = debugfs_create_dir("func_table", nic_dev->dbgfs_root); } void hinic_func_tbl_dbgfs_uninit(struct hinic_dev *nic_dev) { if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) debugfs_remove_recursive(nic_dev->func_tbl_dbgfs); } void hinic_dbg_init(struct hinic_dev *nic_dev) { nic_dev->dbgfs_root = debugfs_create_dir(pci_name(nic_dev->hwdev->hwif->pdev), hinic_dbgfs_root); } void hinic_dbg_uninit(struct hinic_dev *nic_dev) { debugfs_remove_recursive(nic_dev->dbgfs_root); nic_dev->dbgfs_root = NULL; } void hinic_dbg_register_debugfs(const char *debugfs_dir_name) { hinic_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL); } void hinic_dbg_unregister_debugfs(void) { debugfs_remove_recursive(hinic_dbgfs_root); hinic_dbgfs_root = NULL; }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
// SPDX-License-Identifier: GPL-2.0-only /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/types.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/errno.h> #include "hinic_hw_if.h" #include "hinic_hw_dev.h" #include "hinic_port.h" #include "hinic_dev.h" enum mac_op { MAC_DEL, MAC_SET, }; /** * change_mac - change(add or delete) mac address * @nic_dev: nic device * @addr: mac address * @vlan_id: vlan number to set with the mac * @op: add or delete the mac * * Return 0 - Success, negative - Failure **/ static int change_mac(struct hinic_dev *nic_dev, const u8 *addr, u16 vlan_id, enum mac_op op) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_port_mac_cmd port_mac_cmd; struct hinic_hwif *hwif = hwdev->hwif; u16 out_size = sizeof(port_mac_cmd); struct pci_dev *pdev = hwif->pdev; enum hinic_port_cmd cmd; int err; if (op == MAC_SET) cmd = HINIC_PORT_CMD_SET_MAC; else cmd = HINIC_PORT_CMD_DEL_MAC; port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); port_mac_cmd.vlan_id = vlan_id; memcpy(port_mac_cmd.mac, addr, ETH_ALEN); err = hinic_port_msg_cmd(hwdev, cmd, &port_mac_cmd, sizeof(port_mac_cmd), &port_mac_cmd, &out_size); if (err || out_size != sizeof(port_mac_cmd) || (port_mac_cmd.status && (port_mac_cmd.status != HINIC_PF_SET_VF_ALREADY || !HINIC_IS_VF(hwif)) && port_mac_cmd.status != HINIC_MGMT_STATUS_EXIST)) { dev_err(&pdev->dev, "Failed to change MAC, err: %d, status: 0x%x, out size: 0x%x\n", err, port_mac_cmd.status, out_size); return -EFAULT; } if (port_mac_cmd.status == HINIC_PF_SET_VF_ALREADY) { dev_warn(&pdev->dev, "PF has already set VF mac, ignore %s operation\n", (op == MAC_SET) ? "set" : "del"); return HINIC_PF_SET_VF_ALREADY; } if (cmd == HINIC_PORT_CMD_SET_MAC && port_mac_cmd.status == HINIC_MGMT_STATUS_EXIST) dev_warn(&pdev->dev, "MAC is repeated, ignore set operation\n"); return 0; } /** * hinic_port_add_mac - add mac address * @nic_dev: nic device * @addr: mac address * @vlan_id: vlan number to set with the mac * * Return 0 - Success, negative - Failure **/ int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, u16 vlan_id) { return change_mac(nic_dev, addr, vlan_id, MAC_SET); } /** * hinic_port_del_mac - remove mac address * @nic_dev: nic device * @addr: mac address * @vlan_id: vlan number that is connected to the mac * * Return 0 - Success, negative - Failure **/ int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr, u16 vlan_id) { return change_mac(nic_dev, addr, vlan_id, MAC_DEL); } /** * hinic_port_get_mac - get the mac address of the nic device * @nic_dev: nic device * @addr: returned mac address * * Return 0 - Success, negative - Failure **/ int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_port_mac_cmd port_mac_cmd; struct hinic_hwif *hwif = hwdev->hwif; u16 out_size = sizeof(port_mac_cmd); struct pci_dev *pdev = hwif->pdev; int err; port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MAC, &port_mac_cmd, sizeof(port_mac_cmd), &port_mac_cmd, &out_size); if (err || out_size != sizeof(port_mac_cmd) || port_mac_cmd.status) { dev_err(&pdev->dev, "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x\n", err, port_mac_cmd.status, out_size); return -EFAULT; } memcpy(addr, port_mac_cmd.mac, ETH_ALEN); return 0; } /** * hinic_port_set_mtu - set mtu * @nic_dev: nic device * @new_mtu: new mtu * * Return 0 - Success, negative - Failure **/ int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_port_mtu_cmd port_mtu_cmd; struct hinic_hwif *hwif = hwdev->hwif; u16 out_size = sizeof(port_mtu_cmd); struct pci_dev *pdev = hwif->pdev; int err; port_mtu_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); port_mtu_cmd.mtu = new_mtu; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU, &port_mtu_cmd, sizeof(port_mtu_cmd), &port_mtu_cmd, &out_size); if (err || out_size != sizeof(port_mtu_cmd) || port_mtu_cmd.status) { dev_err(&pdev->dev, "Failed to set mtu, err: %d, status: 0x%x, out size: 0x%x\n", err, port_mtu_cmd.status, out_size); return -EFAULT; } return 0; } /** * hinic_port_add_vlan - add vlan to the nic device * @nic_dev: nic device * @vlan_id: the vlan number to add * * Return 0 - Success, negative - Failure **/ int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_port_vlan_cmd port_vlan_cmd; port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); port_vlan_cmd.vlan_id = vlan_id; return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ADD_VLAN, &port_vlan_cmd, sizeof(port_vlan_cmd), NULL, NULL); } /** * hinic_port_del_vlan - delete vlan from the nic device * @nic_dev: nic device * @vlan_id: the vlan number to delete * * Return 0 - Success, negative - Failure **/ int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_port_vlan_cmd port_vlan_cmd; port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); port_vlan_cmd.vlan_id = vlan_id; return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_DEL_VLAN, &port_vlan_cmd, sizeof(port_vlan_cmd), NULL, NULL); } /** * hinic_port_set_rx_mode - set rx mode in the nic device * @nic_dev: nic device * @rx_mode: the rx mode to set * * Return 0 - Success, negative - Failure **/ int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_port_rx_mode_cmd rx_mode_cmd; rx_mode_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); rx_mode_cmd.rx_mode = rx_mode; return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_MODE, &rx_mode_cmd, sizeof(rx_mode_cmd), NULL, NULL); } /** * hinic_port_link_state - get the link state * @nic_dev: nic device * @link_state: the returned link state * * Return 0 - Success, negative - Failure **/ int hinic_port_link_state(struct hinic_dev *nic_dev, enum hinic_port_link_state *link_state) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct hinic_port_link_cmd link_cmd; struct pci_dev *pdev = hwif->pdev; u16 out_size = sizeof(link_cmd); int err; link_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_STATE, &link_cmd, sizeof(link_cmd), &link_cmd, &out_size); if (err || out_size != sizeof(link_cmd) || link_cmd.status) { dev_err(&pdev->dev, "Failed to get link state, err: %d, status: 0x%x, out size: 0x%x\n", err, link_cmd.status, out_size); return -EINVAL; } *link_state = link_cmd.state; return 0; } /** * hinic_port_set_state - set port state * @nic_dev: nic device * @state: the state to set * * Return 0 - Success, negative - Failure **/ int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_port_state_cmd port_state; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; u16 out_size = sizeof(port_state); int err; if (HINIC_IS_VF(hwdev->hwif)) return 0; port_state.state = state; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PORT_STATE, &port_state, sizeof(port_state), &port_state, &out_size); if (err || out_size != sizeof(port_state) || port_state.status) { dev_err(&pdev->dev, "Failed to set port state, err: %d, status: 0x%x, out size: 0x%x\n", err, port_state.status, out_size); return -EFAULT; } return 0; } /** * hinic_port_set_func_state- set func device state * @nic_dev: nic device * @state: the state to set * * Return 0 - Success, negative - Failure **/ int hinic_port_set_func_state(struct hinic_dev *nic_dev, enum hinic_func_port_state state) { struct hinic_port_func_state_cmd func_state; struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; u16 out_size = sizeof(func_state); int err; func_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif); func_state.state = state; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_FUNC_STATE, &func_state, sizeof(func_state), &func_state, &out_size); if (err || out_size != sizeof(func_state) || func_state.status) { dev_err(&pdev->dev, "Failed to set port func state, err: %d, status: 0x%x, out size: 0x%x\n", err, func_state.status, out_size); return -EFAULT; } return 0; } /** * hinic_port_get_cap - get port capabilities * @nic_dev: nic device * @port_cap: returned port capabilities * * Return 0 - Success, negative - Failure **/ int hinic_port_get_cap(struct hinic_dev *nic_dev, struct hinic_port_cap *port_cap) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; u16 out_size = sizeof(*port_cap); int err; port_cap->func_idx = HINIC_HWIF_FUNC_IDX(hwif); err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_CAP, port_cap, sizeof(*port_cap), port_cap, &out_size); if (err || out_size != sizeof(*port_cap) || port_cap->status) { dev_err(&pdev->dev, "Failed to get port capabilities, err: %d, status: 0x%x, out size: 0x%x\n", err, port_cap->status, out_size); return -EIO; } return 0; } /** * hinic_port_set_tso - set port tso configuration * @nic_dev: nic device * @state: the tso state to set * * Return 0 - Success, negative - Failure **/ int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct hinic_tso_config tso_cfg = {0}; struct pci_dev *pdev = hwif->pdev; u16 out_size = sizeof(tso_cfg); int err; tso_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif); tso_cfg.tso_en = state; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_TSO, &tso_cfg, sizeof(tso_cfg), &tso_cfg, &out_size); if (err || out_size != sizeof(tso_cfg) || tso_cfg.status) { dev_err(&pdev->dev, "Failed to set port tso, err: %d, status: 0x%x, out size: 0x%x\n", err, tso_cfg.status, out_size); return -EIO; } return 0; } int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en) { struct hinic_checksum_offload rx_csum_cfg = {0}; struct hinic_hwdev *hwdev = nic_dev->hwdev; u16 out_size = sizeof(rx_csum_cfg); struct hinic_hwif *hwif; struct pci_dev *pdev; int err; if (!hwdev) return -EINVAL; hwif = hwdev->hwif; pdev = hwif->pdev; rx_csum_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif); rx_csum_cfg.rx_csum_offload = en; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_CSUM, &rx_csum_cfg, sizeof(rx_csum_cfg), &rx_csum_cfg, &out_size); if (err || !out_size || rx_csum_cfg.status) { dev_err(&pdev->dev, "Failed to set rx csum offload, err: %d, status: 0x%x, out size: 0x%x\n", err, rx_csum_cfg.status, out_size); return -EIO; } return 0; } int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_vlan_cfg vlan_cfg; struct hinic_hwif *hwif; struct pci_dev *pdev; u16 out_size; int err; if (!hwdev) return -EINVAL; out_size = sizeof(vlan_cfg); hwif = hwdev->hwif; pdev = hwif->pdev; vlan_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif); vlan_cfg.vlan_rx_offload = en; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD, &vlan_cfg, sizeof(vlan_cfg), &vlan_cfg, &out_size); if (err || !out_size || vlan_cfg.status) { dev_err(&pdev->dev, "Failed to set rx vlan offload, err: %d, status: 0x%x, out size: 0x%x\n", err, vlan_cfg.status, out_size); return -EINVAL; } return 0; } int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_vlan_filter vlan_filter; u16 out_size = sizeof(vlan_filter); int err; vlan_filter.func_idx = HINIC_HWIF_FUNC_IDX(hwif); vlan_filter.enable = en; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VLAN_FILTER, &vlan_filter, sizeof(vlan_filter), &vlan_filter, &out_size); if (vlan_filter.status == HINIC_MGMT_CMD_UNSUPPORTED) { err = HINIC_MGMT_CMD_UNSUPPORTED; } else if ((err == HINIC_MBOX_VF_CMD_ERROR) && HINIC_IS_VF(hwif)) { err = HINIC_MGMT_CMD_UNSUPPORTED; } else if (err || !out_size || vlan_filter.status) { dev_err(&pdev->dev, "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n", err, vlan_filter.status, out_size); err = -EINVAL; } return err; } int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct hinic_rq_num rq_num = { 0 }; struct pci_dev *pdev = hwif->pdev; u16 out_size = sizeof(rq_num); int err; rq_num.func_id = HINIC_HWIF_FUNC_IDX(hwif); rq_num.num_rqs = num_rqs; rq_num.rq_depth = ilog2(nic_dev->rq_depth); err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RQ_IQ_MAP, &rq_num, sizeof(rq_num), &rq_num, &out_size); if (err || !out_size || rq_num.status) { dev_err(&pdev->dev, "Failed to set rxq number, err: %d, status: 0x%x, out size: 0x%x\n", err, rq_num.status, out_size); return -EIO; } return 0; } static int hinic_set_rx_lro(struct hinic_dev *nic_dev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_lro_config lro_cfg = { 0 }; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; u16 out_size = sizeof(lro_cfg); int err; lro_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif); lro_cfg.lro_ipv4_en = ipv4_en; lro_cfg.lro_ipv6_en = ipv6_en; lro_cfg.lro_max_wqe_num = max_wqe_num; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LRO, &lro_cfg, sizeof(lro_cfg), &lro_cfg, &out_size); if (err || !out_size || lro_cfg.status) { dev_err(&pdev->dev, "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\n", err, lro_cfg.status, out_size); return -EIO; } return 0; } static int hinic_set_rx_lro_timer(struct hinic_dev *nic_dev, u32 timer_value) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_lro_timer lro_timer = { 0 }; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; u16 out_size = sizeof(lro_timer); int err; lro_timer.status = 0; lro_timer.type = 0; lro_timer.enable = 1; lro_timer.timer = timer_value; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LRO_TIMER, &lro_timer, sizeof(lro_timer), &lro_timer, &out_size); if (lro_timer.status == 0xFF) { /* For this case, we think status (0xFF) is OK */ lro_timer.status = 0; dev_dbg(&pdev->dev, "Set lro timer not supported by the current FW version, it will be 1ms default\n"); } if (err || !out_size || lro_timer.status) { dev_err(&pdev->dev, "Failed to set lro timer, err: %d, status: 0x%x, out size: 0x%x\n", err, lro_timer.status, out_size); return -EIO; } return 0; } int hinic_set_rx_lro_state(struct hinic_dev *nic_dev, u8 lro_en, u32 lro_timer, u32 wqe_num) { struct hinic_hwdev *hwdev = nic_dev->hwdev; u8 ipv4_en; u8 ipv6_en; int err; if (!hwdev) return -EINVAL; ipv4_en = lro_en ? 1 : 0; ipv6_en = lro_en ? 1 : 0; err = hinic_set_rx_lro(nic_dev, ipv4_en, ipv6_en, (u8)wqe_num); if (err) return err; if (HINIC_IS_VF(nic_dev->hwdev->hwif)) return 0; err = hinic_set_rx_lro_timer(nic_dev, lro_timer); if (err) return err; return 0; } int hinic_rss_set_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx, const u32 *indir_table) { struct hinic_rss_indirect_tbl *indir_tbl; struct hinic_func_to_io *func_to_io; struct hinic_cmdq_buf cmd_buf; struct hinic_hwdev *hwdev; struct hinic_hwif *hwif; struct pci_dev *pdev; u32 indir_size; u64 out_param; int err, i; u32 *temp; hwdev = nic_dev->hwdev; func_to_io = &hwdev->func_to_io; hwif = hwdev->hwif; pdev = hwif->pdev; err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmd_buf); if (err) { dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); return err; } cmd_buf.size = sizeof(*indir_tbl); indir_tbl = cmd_buf.buf; indir_tbl->group_index = cpu_to_be32(tmpl_idx); for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) { indir_tbl->entry[i] = indir_table[i]; if (0x3 == (i & 0x3)) { temp = (u32 *)&indir_tbl->entry[i - 3]; *temp = cpu_to_be32(*temp); } } /* cfg the rss indirect table by command queue */ indir_size = HINIC_RSS_INDIR_SIZE / 2; indir_tbl->offset = 0; indir_tbl->size = cpu_to_be32(indir_size); err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE, &cmd_buf, &out_param); if (err || out_param != 0) { dev_err(&pdev->dev, "Failed to set rss indir table\n"); err = -EFAULT; goto free_buf; } indir_tbl->offset = cpu_to_be32(indir_size); indir_tbl->size = cpu_to_be32(indir_size); memcpy(&indir_tbl->entry[0], &indir_tbl->entry[indir_size], indir_size); err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE, &cmd_buf, &out_param); if (err || out_param != 0) { dev_err(&pdev->dev, "Failed to set rss indir table\n"); err = -EFAULT; } free_buf: hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmd_buf); return err; } int hinic_rss_get_indir_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx, u32 *indir_table) { struct hinic_rss_indir_table rss_cfg = { 0 }; struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; u16 out_size = sizeof(rss_cfg); int err = 0, i; rss_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif); rss_cfg.template_id = tmpl_idx; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL, &rss_cfg, sizeof(rss_cfg), &rss_cfg, &out_size); if (err || !out_size || rss_cfg.status) { dev_err(&pdev->dev, "Failed to get indir table, err: %d, status: 0x%x, out size: 0x%x\n", err, rss_cfg.status, out_size); return -EINVAL; } hinic_be32_to_cpu(rss_cfg.indir, HINIC_RSS_INDIR_SIZE); for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) indir_table[i] = rss_cfg.indir[i]; return 0; } int hinic_set_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx, struct hinic_rss_type rss_type) { struct hinic_rss_context_tbl *ctx_tbl; struct hinic_func_to_io *func_to_io; struct hinic_cmdq_buf cmd_buf; struct hinic_hwdev *hwdev; struct hinic_hwif *hwif; struct pci_dev *pdev; u64 out_param; u32 ctx = 0; int err; hwdev = nic_dev->hwdev; func_to_io = &hwdev->func_to_io; hwif = hwdev->hwif; pdev = hwif->pdev; err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmd_buf); if (err) { dev_err(&pdev->dev, "Failed to allocate cmd buf\n"); return -ENOMEM; } ctx |= HINIC_RSS_TYPE_SET(1, VALID) | HINIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) | HINIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) | HINIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) | HINIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) | HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) | HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) | HINIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) | HINIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6); cmd_buf.size = sizeof(struct hinic_rss_context_tbl); ctx_tbl = (struct hinic_rss_context_tbl *)cmd_buf.buf; ctx_tbl->group_index = cpu_to_be32(tmpl_idx); ctx_tbl->offset = 0; ctx_tbl->size = sizeof(u32); ctx_tbl->size = cpu_to_be32(ctx_tbl->size); ctx_tbl->rsvd = 0; ctx_tbl->ctx = cpu_to_be32(ctx); /* cfg the rss context table by command queue */ err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE, &cmd_buf, &out_param); hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmd_buf); if (err || out_param != 0) { dev_err(&pdev->dev, "Failed to set rss context table, err: %d\n", err); return -EFAULT; } return 0; } int hinic_get_rss_type(struct hinic_dev *nic_dev, u32 tmpl_idx, struct hinic_rss_type *rss_type) { struct hinic_rss_context_table ctx_tbl = { 0 }; struct hinic_hwdev *hwdev = nic_dev->hwdev; u16 out_size = sizeof(ctx_tbl); struct hinic_hwif *hwif; struct pci_dev *pdev; int err; if (!hwdev || !rss_type) return -EINVAL; hwif = hwdev->hwif; pdev = hwif->pdev; ctx_tbl.func_id = HINIC_HWIF_FUNC_IDX(hwif); ctx_tbl.template_id = tmpl_idx; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_CTX_TBL, &ctx_tbl, sizeof(ctx_tbl), &ctx_tbl, &out_size); if (err || !out_size || ctx_tbl.status) { dev_err(&pdev->dev, "Failed to get hash type, err: %d, status: 0x%x, out size: 0x%x\n", err, ctx_tbl.status, out_size); return -EINVAL; } rss_type->ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV4); rss_type->ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6); rss_type->ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT); rss_type->tcp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4); rss_type->tcp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6); rss_type->tcp_ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6_EXT); rss_type->udp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4); rss_type->udp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6); return 0; } int hinic_rss_set_template_tbl(struct hinic_dev *nic_dev, u32 template_id, const u8 *temp) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct hinic_rss_key rss_key = { 0 }; struct pci_dev *pdev = hwif->pdev; u16 out_size = sizeof(rss_key); int err; rss_key.func_id = HINIC_HWIF_FUNC_IDX(hwif); rss_key.template_id = template_id; memcpy(rss_key.key, temp, HINIC_RSS_KEY_SIZE); err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL, &rss_key, sizeof(rss_key), &rss_key, &out_size); if (err || !out_size || rss_key.status) { dev_err(&pdev->dev, "Failed to set rss hash key, err: %d, status: 0x%x, out size: 0x%x\n", err, rss_key.status, out_size); return -EINVAL; } return 0; } int hinic_rss_get_template_tbl(struct hinic_dev *nic_dev, u32 tmpl_idx, u8 *temp) { struct hinic_rss_template_key temp_key = { 0 }; struct hinic_hwdev *hwdev = nic_dev->hwdev; u16 out_size = sizeof(temp_key); struct hinic_hwif *hwif; struct pci_dev *pdev; int err; if (!hwdev || !temp) return -EINVAL; hwif = hwdev->hwif; pdev = hwif->pdev; temp_key.func_id = HINIC_HWIF_FUNC_IDX(hwif); temp_key.template_id = tmpl_idx; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, &temp_key, sizeof(temp_key), &temp_key, &out_size); if (err || !out_size || temp_key.status) { dev_err(&pdev->dev, "Failed to set hash key, err: %d, status: 0x%x, out size: 0x%x\n", err, temp_key.status, out_size); return -EINVAL; } memcpy(temp, temp_key.key, HINIC_RSS_KEY_SIZE); return 0; } int hinic_rss_set_hash_engine(struct hinic_dev *nic_dev, u8 template_id, u8 type) { struct hinic_rss_engine_type rss_engine = { 0 }; struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; u16 out_size = sizeof(rss_engine); int err; rss_engine.func_id = HINIC_HWIF_FUNC_IDX(hwif); rss_engine.hash_engine = type; rss_engine.template_id = template_id; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, &rss_engine, sizeof(rss_engine), &rss_engine, &out_size); if (err || !out_size || rss_engine.status) { dev_err(&pdev->dev, "Failed to set hash engine, err: %d, status: 0x%x, out size: 0x%x\n", err, rss_engine.status, out_size); return -EINVAL; } return 0; } int hinic_rss_get_hash_engine(struct hinic_dev *nic_dev, u8 tmpl_idx, u8 *type) { struct hinic_rss_engine_type hash_type = { 0 }; struct hinic_hwdev *hwdev = nic_dev->hwdev; u16 out_size = sizeof(hash_type); struct hinic_hwif *hwif; struct pci_dev *pdev; int err; if (!hwdev || !type) return -EINVAL; hwif = hwdev->hwif; pdev = hwif->pdev; hash_type.func_id = HINIC_HWIF_FUNC_IDX(hwif); hash_type.template_id = tmpl_idx; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, &hash_type, sizeof(hash_type), &hash_type, &out_size); if (err || !out_size || hash_type.status) { dev_err(&pdev->dev, "Failed to get hash engine, err: %d, status: 0x%x, out size: 0x%x\n", err, hash_type.status, out_size); return -EINVAL; } *type = hash_type.hash_engine; return 0; } int hinic_rss_cfg(struct hinic_dev *nic_dev, u8 rss_en, u8 template_id) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_rss_config rss_cfg = { 0 }; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; u16 out_size = sizeof(rss_cfg); int err; rss_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif); rss_cfg.rss_en = rss_en; rss_cfg.template_id = template_id; rss_cfg.rq_priority_number = 0; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_RSS_CFG, &rss_cfg, sizeof(rss_cfg), &rss_cfg, &out_size); if (err || !out_size || rss_cfg.status) { dev_err(&pdev->dev, "Failed to set rss cfg, err: %d, status: 0x%x, out size: 0x%x\n", err, rss_cfg.status, out_size); return -EINVAL; } return 0; } int hinic_rss_template_alloc(struct hinic_dev *nic_dev, u8 *tmpl_idx) { struct hinic_rss_template_mgmt template_mgmt = { 0 }; struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; u16 out_size = sizeof(template_mgmt); struct pci_dev *pdev = hwif->pdev; int err; template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif); template_mgmt.cmd = NIC_RSS_CMD_TEMP_ALLOC; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR, &template_mgmt, sizeof(template_mgmt), &template_mgmt, &out_size); if (err || !out_size || template_mgmt.status) { dev_err(&pdev->dev, "Failed to alloc rss template, err: %d, status: 0x%x, out size: 0x%x\n", err, template_mgmt.status, out_size); return -EINVAL; } *tmpl_idx = template_mgmt.template_id; return 0; } int hinic_rss_template_free(struct hinic_dev *nic_dev, u8 tmpl_idx) { struct hinic_rss_template_mgmt template_mgmt = { 0 }; struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; u16 out_size = sizeof(template_mgmt); struct pci_dev *pdev = hwif->pdev; int err; template_mgmt.func_id = HINIC_HWIF_FUNC_IDX(hwif); template_mgmt.template_id = tmpl_idx; template_mgmt.cmd = NIC_RSS_CMD_TEMP_FREE; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR, &template_mgmt, sizeof(template_mgmt), &template_mgmt, &out_size); if (err || !out_size || template_mgmt.status) { dev_err(&pdev->dev, "Failed to free rss template, err: %d, status: 0x%x, out size: 0x%x\n", err, template_mgmt.status, out_size); return -EINVAL; } return 0; } int hinic_get_vport_stats(struct hinic_dev *nic_dev, struct hinic_vport_stats *stats) { struct hinic_cmd_vport_stats vport_stats = { 0 }; struct hinic_port_stats_info stats_info = { 0 }; struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; u16 out_size = sizeof(vport_stats); struct pci_dev *pdev = hwif->pdev; int err; stats_info.stats_version = HINIC_PORT_STATS_VERSION; stats_info.func_id = HINIC_HWIF_FUNC_IDX(hwif); stats_info.stats_size = sizeof(vport_stats); err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_VPORT_STAT, &stats_info, sizeof(stats_info), &vport_stats, &out_size); if (err || !out_size || vport_stats.status) { dev_err(&pdev->dev, "Failed to get function statistics, err: %d, status: 0x%x, out size: 0x%x\n", err, vport_stats.status, out_size); return -EFAULT; } memcpy(stats, &vport_stats.stats, sizeof(*stats)); return 0; } int hinic_get_phy_port_stats(struct hinic_dev *nic_dev, struct hinic_phy_port_stats *stats) { struct hinic_port_stats_info stats_info = { 0 }; struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct hinic_port_stats *port_stats; u16 out_size = sizeof(*port_stats); struct pci_dev *pdev = hwif->pdev; int err; port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); if (!port_stats) return -ENOMEM; stats_info.stats_version = HINIC_PORT_STATS_VERSION; stats_info.stats_size = sizeof(*port_stats); err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_PORT_STATISTICS, &stats_info, sizeof(stats_info), port_stats, &out_size); if (err || !out_size || port_stats->status) { dev_err(&pdev->dev, "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\n", err, port_stats->status, out_size); err = -EINVAL; goto out; } memcpy(stats, &port_stats->stats, sizeof(*stats)); out: kfree(port_stats); return err; } int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver) { struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_version_info up_ver = {0}; u16 out_size = sizeof(up_ver); struct hinic_hwif *hwif; struct pci_dev *pdev; int err; if (!hwdev) return -EINVAL; hwif = hwdev->hwif; pdev = hwif->pdev; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION, &up_ver, sizeof(up_ver), &up_ver, &out_size); if (err || !out_size || up_ver.status) { dev_err(&pdev->dev, "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\n", err, up_ver.status, out_size); return -EINVAL; } snprintf(mgmt_ver, HINIC_MGMT_VERSION_MAX_LEN, "%s", up_ver.ver); return 0; } int hinic_get_link_mode(struct hinic_hwdev *hwdev, struct hinic_link_mode_cmd *link_mode) { u16 out_size; int err; if (!hwdev || !link_mode) return -EINVAL; link_mode->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); out_size = sizeof(*link_mode); err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_MODE, link_mode, sizeof(*link_mode), link_mode, &out_size); if (err || !out_size || link_mode->status) { dev_err(&hwdev->hwif->pdev->dev, "Failed to get link mode, err: %d, status: 0x%x, out size: 0x%x\n", err, link_mode->status, out_size); return -EIO; } return 0; } int hinic_set_autoneg(struct hinic_hwdev *hwdev, bool enable) { struct hinic_set_autoneg_cmd autoneg = {0}; u16 out_size = sizeof(autoneg); int err; if (!hwdev) return -EINVAL; autoneg.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); autoneg.enable = enable; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_AUTONEG, &autoneg, sizeof(autoneg), &autoneg, &out_size); if (err || !out_size || autoneg.status) { dev_err(&hwdev->hwif->pdev->dev, "Failed to %s autoneg, err: %d, status: 0x%x, out size: 0x%x\n", enable ? "enable" : "disable", err, autoneg.status, out_size); return -EIO; } return 0; } int hinic_set_speed(struct hinic_hwdev *hwdev, enum nic_speed_level speed) { struct hinic_speed_cmd speed_info = {0}; u16 out_size = sizeof(speed_info); int err; if (!hwdev) return -EINVAL; speed_info.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); speed_info.speed = speed; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_SPEED, &speed_info, sizeof(speed_info), &speed_info, &out_size); if (err || !out_size || speed_info.status) { dev_err(&hwdev->hwif->pdev->dev, "Failed to set speed, err: %d, status: 0x%x, out size: 0x%x\n", err, speed_info.status, out_size); return -EIO; } return 0; } int hinic_set_link_settings(struct hinic_hwdev *hwdev, struct hinic_link_ksettings_info *info) { u16 out_size = sizeof(*info); int err; err = hinic_hilink_msg_cmd(hwdev, HINIC_HILINK_CMD_SET_LINK_SETTINGS, info, sizeof(*info), info, &out_size); if ((info->status != HINIC_MGMT_CMD_UNSUPPORTED && info->status) || err || !out_size) { dev_err(&hwdev->hwif->pdev->dev, "Failed to set link settings, err: %d, status: 0x%x, out size: 0x%x\n", err, info->status, out_size); return -EFAULT; } return info->status; } int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev, struct hinic_pause_config *pause_info) { u16 out_size = sizeof(*pause_info); int err; pause_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_PAUSE_INFO, pause_info, sizeof(*pause_info), pause_info, &out_size); if (err || !out_size || pause_info->status) { dev_err(&hwdev->hwif->pdev->dev, "Failed to get pause info, err: %d, status: 0x%x, out size: 0x%x\n", err, pause_info->status, out_size); return -EIO; } return 0; } int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev, struct hinic_pause_config *pause_info) { u16 out_size = sizeof(*pause_info); int err; pause_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO, pause_info, sizeof(*pause_info), pause_info, &out_size); if (err || !out_size || pause_info->status) { dev_err(&hwdev->hwif->pdev->dev, "Failed to set pause info, err: %d, status: 0x%x, out size: 0x%x\n", err, pause_info->status, out_size); return -EIO; } return 0; } int hinic_dcb_set_pfc(struct hinic_hwdev *hwdev, u8 pfc_en, u8 pfc_bitmap) { struct hinic_nic_cfg *nic_cfg = &hwdev->func_to_io.nic_cfg; struct hinic_set_pfc pfc = {0}; u16 out_size = sizeof(pfc); int err; if (HINIC_IS_VF(hwdev->hwif)) return 0; mutex_lock(&nic_cfg->cfg_mutex); pfc.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); pfc.pfc_bitmap = pfc_bitmap; pfc.pfc_en = pfc_en; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PFC, &pfc, sizeof(pfc), &pfc, &out_size); if (err || pfc.status || !out_size) { dev_err(&hwdev->hwif->pdev->dev, "Failed to %s pfc, err: %d, status: 0x%x, out size: 0x%x\n", pfc_en ? "enable" : "disable", err, pfc.status, out_size); mutex_unlock(&nic_cfg->cfg_mutex); return -EIO; } /* pause settings is opposite from pfc */ nic_cfg->rx_pause = pfc_en ? 0 : 1; nic_cfg->tx_pause = pfc_en ? 0 : 1; mutex_unlock(&nic_cfg->cfg_mutex); return 0; } int hinic_set_loopback_mode(struct hinic_hwdev *hwdev, u32 mode, u32 enable) { struct hinic_port_loopback lb = {0}; u16 out_size = sizeof(lb); int err; lb.mode = mode; lb.en = enable; if (mode < LOOP_MODE_MIN || mode > LOOP_MODE_MAX) { dev_err(&hwdev->hwif->pdev->dev, "Invalid loopback mode %d to set\n", mode); return -EINVAL; } err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LOOPBACK_MODE, &lb, sizeof(lb), &lb, &out_size); if (err || !out_size || lb.status) { dev_err(&hwdev->hwif->pdev->dev, "Failed to set loopback mode %d en %d, err: %d, status: 0x%x, out size: 0x%x\n", mode, enable, err, lb.status, out_size); return -EIO; } return 0; } static int _set_led_status(struct hinic_hwdev *hwdev, u8 port, enum hinic_led_type type, enum hinic_led_mode mode, u8 reset) { struct hinic_led_info led_info = {0}; u16 out_size = sizeof(led_info); struct hinic_pfhwdev *pfhwdev; int err; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); led_info.port = port; led_info.reset = reset; led_info.type = type; led_info.mode = mode; err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, HINIC_COMM_CMD_SET_LED_STATUS, &led_info, sizeof(led_info), &led_info, &out_size, HINIC_MGMT_MSG_SYNC); if (err || led_info.status || !out_size) { dev_err(&hwdev->hwif->pdev->dev, "Failed to set led status, err: %d, status: 0x%x, out size: 0x%x\n", err, led_info.status, out_size); return -EIO; } return 0; } int hinic_set_led_status(struct hinic_hwdev *hwdev, u8 port, enum hinic_led_type type, enum hinic_led_mode mode) { if (!hwdev) return -EINVAL; return _set_led_status(hwdev, port, type, mode, 0); } int hinic_reset_led_status(struct hinic_hwdev *hwdev, u8 port) { int err; if (!hwdev) return -EINVAL; err = _set_led_status(hwdev, port, HINIC_LED_TYPE_INVALID, HINIC_LED_MODE_INVALID, 1); if (err) dev_err(&hwdev->hwif->pdev->dev, "Failed to reset led status\n"); return err; } static bool hinic_if_sfp_absent(struct hinic_hwdev *hwdev) { struct hinic_cmd_get_light_module_abs sfp_abs = {0}; u16 out_size = sizeof(sfp_abs); u8 port_id = hwdev->port_id; int err; sfp_abs.port_id = port_id; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_SFP_ABS, &sfp_abs, sizeof(sfp_abs), &sfp_abs, &out_size); if (sfp_abs.status || err || !out_size) { dev_err(&hwdev->hwif->pdev->dev, "Failed to get port%d sfp absent status, err: %d, status: 0x%x, out size: 0x%x\n", port_id, err, sfp_abs.status, out_size); return true; } return ((sfp_abs.abs_status == 0) ? false : true); } int hinic_get_sfp_eeprom(struct hinic_hwdev *hwdev, u8 *data, u16 *len) { struct hinic_cmd_get_std_sfp_info sfp_info = {0}; u16 out_size = sizeof(sfp_info); u8 port_id; int err; if (!hwdev || !data || !len) return -EINVAL; port_id = hwdev->port_id; if (hinic_if_sfp_absent(hwdev)) return -ENXIO; sfp_info.port_id = port_id; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_STD_SFP_INFO, &sfp_info, sizeof(sfp_info), &sfp_info, &out_size); if (sfp_info.status || err || !out_size) { dev_err(&hwdev->hwif->pdev->dev, "Failed to get port%d sfp eeprom information, err: %d, status: 0x%x, out size: 0x%x\n", port_id, err, sfp_info.status, out_size); return -EIO; } *len = min_t(u16, sfp_info.eeprom_len, STD_SFP_INFO_MAX_SIZE); memcpy(data, sfp_info.sfp_info, STD_SFP_INFO_MAX_SIZE); return 0; } int hinic_get_sfp_type(struct hinic_hwdev *hwdev, u8 *data0, u8 *data1) { u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; u16 len; int err; if (hinic_if_sfp_absent(hwdev)) return -ENXIO; err = hinic_get_sfp_eeprom(hwdev, sfp_data, &len); if (err) return err; *data0 = sfp_data[0]; *data1 = sfp_data[1]; return 0; }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_port.c
// SPDX-License-Identifier: GPL-2.0 /* Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/etherdevice.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <linux/ethtool.h> #include <linux/vmalloc.h> #include <linux/sfp.h> #include "hinic_hw_qp.h" #include "hinic_hw_dev.h" #include "hinic_port.h" #include "hinic_tx.h" #include "hinic_rx.h" #include "hinic_dev.h" #define SET_LINK_STR_MAX_LEN 16 #define GET_SUPPORTED_MODE 0 #define GET_ADVERTISED_MODE 1 #define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \ ((ecmd)->supported |= \ (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit)) #define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \ ((ecmd)->advertising |= \ (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit)) #define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \ ((ecmd)->supported |= SUPPORTED_##mode) #define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \ ((ecmd)->advertising |= ADVERTISED_##mode) #define COALESCE_PENDING_LIMIT_UNIT 8 #define COALESCE_TIMER_CFG_UNIT 9 #define COALESCE_ALL_QUEUE 0xFFFF #define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT) #define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT) struct hw2ethtool_link_mode { enum ethtool_link_mode_bit_indices link_mode_bit; u32 speed; enum hinic_link_mode hw_link_mode; }; struct cmd_link_settings { u64 supported; u64 advertising; u32 speed; u8 duplex; u8 port; u8 autoneg; }; static u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = { SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, SPEED_25000, SPEED_40000, SPEED_100000 }; static struct hw2ethtool_link_mode hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = { { .link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, .speed = SPEED_10000, .hw_link_mode = HINIC_10GE_BASE_KR, }, { .link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, .speed = SPEED_40000, .hw_link_mode = HINIC_40GE_BASE_KR4, }, { .link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, .speed = SPEED_40000, .hw_link_mode = HINIC_40GE_BASE_CR4, }, { .link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, .speed = SPEED_100000, .hw_link_mode = HINIC_100GE_BASE_KR4, }, { .link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, .speed = SPEED_100000, .hw_link_mode = HINIC_100GE_BASE_CR4, }, { .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, .speed = SPEED_25000, .hw_link_mode = HINIC_25GE_BASE_KR_S, }, { .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, .speed = SPEED_25000, .hw_link_mode = HINIC_25GE_BASE_CR_S, }, { .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, .speed = SPEED_25000, .hw_link_mode = HINIC_25GE_BASE_KR, }, { .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, .speed = SPEED_25000, .hw_link_mode = HINIC_25GE_BASE_CR, }, { .link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, .speed = SPEED_1000, .hw_link_mode = HINIC_GE_BASE_KX, }, }; #define LP_DEFAULT_TIME 5 /* seconds */ #define LP_PKT_LEN 1514 #define PORT_DOWN_ERR_IDX 0 enum diag_test_index { INTERNAL_LP_TEST = 0, EXTERNAL_LP_TEST = 1, DIAG_TEST_MAX = 2, }; static void set_link_speed(struct ethtool_link_ksettings *link_ksettings, enum hinic_speed speed) { switch (speed) { case HINIC_SPEED_10MB_LINK: link_ksettings->base.speed = SPEED_10; break; case HINIC_SPEED_100MB_LINK: link_ksettings->base.speed = SPEED_100; break; case HINIC_SPEED_1000MB_LINK: link_ksettings->base.speed = SPEED_1000; break; case HINIC_SPEED_10GB_LINK: link_ksettings->base.speed = SPEED_10000; break; case HINIC_SPEED_25GB_LINK: link_ksettings->base.speed = SPEED_25000; break; case HINIC_SPEED_40GB_LINK: link_ksettings->base.speed = SPEED_40000; break; case HINIC_SPEED_100GB_LINK: link_ksettings->base.speed = SPEED_100000; break; default: link_ksettings->base.speed = SPEED_UNKNOWN; break; } } static int hinic_get_link_mode_index(enum hinic_link_mode link_mode) { int i = 0; for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) { if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode) break; } return i; } static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings, enum hinic_link_mode hw_link_mode, u32 name) { enum hinic_link_mode link_mode; int idx = 0; for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) { if (hw_link_mode & ((u32)1 << link_mode)) { idx = hinic_get_link_mode_index(link_mode); if (idx >= HINIC_LINK_MODE_NUMBERS) continue; if (name == GET_SUPPORTED_MODE) ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE (link_settings, idx); else ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE (link_settings, idx); } } } static void hinic_link_port_type(struct cmd_link_settings *link_settings, enum hinic_port_type port_type) { switch (port_type) { case HINIC_PORT_ELEC: case HINIC_PORT_TP: ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP); ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP); link_settings->port = PORT_TP; break; case HINIC_PORT_AOC: case HINIC_PORT_FIBRE: ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); link_settings->port = PORT_FIBRE; break; case HINIC_PORT_COPPER: ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); link_settings->port = PORT_DA; break; case HINIC_PORT_BACKPLANE: ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane); ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane); link_settings->port = PORT_NONE; break; default: link_settings->port = PORT_OTHER; break; } } static int hinic_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *link_ksettings) { struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_link_mode_cmd link_mode = { 0 }; struct hinic_pause_config pause_info = { 0 }; struct cmd_link_settings settings = { 0 }; enum hinic_port_link_state link_state; struct hinic_port_cap port_cap; int err; ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); link_ksettings->base.speed = SPEED_UNKNOWN; link_ksettings->base.autoneg = AUTONEG_DISABLE; link_ksettings->base.duplex = DUPLEX_UNKNOWN; err = hinic_port_get_cap(nic_dev, &port_cap); if (err) return err; hinic_link_port_type(&settings, port_cap.port_type); link_ksettings->base.port = settings.port; err = hinic_port_link_state(nic_dev, &link_state); if (err) return err; if (link_state == HINIC_LINK_STATE_UP) { set_link_speed(link_ksettings, port_cap.speed); link_ksettings->base.duplex = (port_cap.duplex == HINIC_DUPLEX_FULL) ? DUPLEX_FULL : DUPLEX_HALF; } if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED)) ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Autoneg); if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE) link_ksettings->base.autoneg = AUTONEG_ENABLE; err = hinic_get_link_mode(nic_dev->hwdev, &link_mode); if (err || link_mode.supported == HINIC_SUPPORTED_UNKNOWN || link_mode.advertised == HINIC_SUPPORTED_UNKNOWN) return -EIO; hinic_add_ethtool_link_mode(&settings, link_mode.supported, GET_SUPPORTED_MODE); hinic_add_ethtool_link_mode(&settings, link_mode.advertised, GET_ADVERTISED_MODE); if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) { err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info); if (err) return err; ETHTOOL_ADD_SUPPORTED_LINK_MODE(&settings, Pause); if (pause_info.rx_pause && pause_info.tx_pause) { ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause); } else if (pause_info.tx_pause) { ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause); } else if (pause_info.rx_pause) { ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause); ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause); } } linkmode_copy(link_ksettings->link_modes.supported, (unsigned long *)&settings.supported); linkmode_copy(link_ksettings->link_modes.advertising, (unsigned long *)&settings.advertising); return 0; } static int hinic_ethtool_to_hw_speed_level(u32 speed) { int i; for (i = 0; i < LINK_SPEED_LEVELS; i++) { if (hw_to_ethtool_speed[i] == speed) break; } return i; } static bool hinic_is_support_speed(enum hinic_link_mode supported_link, u32 speed) { enum hinic_link_mode link_mode; int idx; for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) { if (!(supported_link & ((u32)1 << link_mode))) continue; idx = hinic_get_link_mode_index(link_mode); if (idx >= HINIC_LINK_MODE_NUMBERS) continue; if (hw_to_ethtool_link_mode_table[idx].speed == speed) return true; } return false; } static bool hinic_is_speed_legal(struct hinic_dev *nic_dev, u32 speed) { struct hinic_link_mode_cmd link_mode = { 0 }; struct net_device *netdev = nic_dev->netdev; enum nic_speed_level speed_level = 0; int err; err = hinic_get_link_mode(nic_dev->hwdev, &link_mode); if (err) return false; if (link_mode.supported == HINIC_SUPPORTED_UNKNOWN || link_mode.advertised == HINIC_SUPPORTED_UNKNOWN) return false; speed_level = hinic_ethtool_to_hw_speed_level(speed); if (speed_level >= LINK_SPEED_LEVELS || !hinic_is_support_speed(link_mode.supported, speed)) { netif_err(nic_dev, drv, netdev, "Unsupported speed: %d\n", speed); return false; } return true; } static int get_link_settings_type(struct hinic_dev *nic_dev, u8 autoneg, u32 speed, u32 *set_settings) { struct hinic_port_cap port_cap = { 0 }; int err; err = hinic_port_get_cap(nic_dev, &port_cap); if (err) return err; /* always set autonegotiation */ if (port_cap.autoneg_cap) *set_settings |= HILINK_LINK_SET_AUTONEG; if (autoneg == AUTONEG_ENABLE) { if (!port_cap.autoneg_cap) { netif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n"); return -EOPNOTSUPP; } } else if (speed != (u32)SPEED_UNKNOWN) { /* set speed only when autoneg is disabled */ if (!hinic_is_speed_legal(nic_dev, speed)) return -EINVAL; *set_settings |= HILINK_LINK_SET_SPEED; } else { netif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n"); return -EOPNOTSUPP; } return 0; } static int set_link_settings_separate_cmd(struct hinic_dev *nic_dev, u32 set_settings, u8 autoneg, u32 speed) { enum nic_speed_level speed_level = 0; int err = 0; if (set_settings & HILINK_LINK_SET_AUTONEG) { err = hinic_set_autoneg(nic_dev->hwdev, (autoneg == AUTONEG_ENABLE)); if (err) netif_err(nic_dev, drv, nic_dev->netdev, "%s autoneg failed\n", (autoneg == AUTONEG_ENABLE) ? "Enable" : "Disable"); else netif_info(nic_dev, drv, nic_dev->netdev, "%s autoneg successfully\n", (autoneg == AUTONEG_ENABLE) ? "Enable" : "Disable"); } if (!err && (set_settings & HILINK_LINK_SET_SPEED)) { speed_level = hinic_ethtool_to_hw_speed_level(speed); err = hinic_set_speed(nic_dev->hwdev, speed_level); if (err) netif_err(nic_dev, drv, nic_dev->netdev, "Set speed %d failed\n", speed); else netif_info(nic_dev, drv, nic_dev->netdev, "Set speed %d successfully\n", speed); } return err; } static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev, u32 set_settings, u8 autoneg, u32 speed) { struct hinic_link_ksettings_info settings = {0}; char set_link_str[SET_LINK_STR_MAX_LEN] = {0}; const char *autoneg_str; struct net_device *netdev = nic_dev->netdev; enum nic_speed_level speed_level = 0; int err; autoneg_str = (set_settings & HILINK_LINK_SET_AUTONEG) ? (autoneg ? "autong enable " : "autong disable ") : ""; if (set_settings & HILINK_LINK_SET_SPEED) { speed_level = hinic_ethtool_to_hw_speed_level(speed); err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN, "speed %d ", speed); if (err >= SET_LINK_STR_MAX_LEN) { netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n", err, SET_LINK_STR_MAX_LEN); return -EFAULT; } } settings.func_id = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif); settings.valid_bitmap = set_settings; settings.autoneg = autoneg; settings.speed = speed_level; err = hinic_set_link_settings(nic_dev->hwdev, &settings); if (err != HINIC_MGMT_CMD_UNSUPPORTED) { if (err) netif_err(nic_dev, drv, netdev, "Set %s%sfailed\n", autoneg_str, set_link_str); else netif_info(nic_dev, drv, netdev, "Set %s%ssuccessfully\n", autoneg_str, set_link_str); return err; } return set_link_settings_separate_cmd(nic_dev, set_settings, autoneg, speed); } static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed) { struct hinic_dev *nic_dev = netdev_priv(netdev); u32 set_settings = 0; int err; err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings); if (err) return err; if (set_settings) err = hinic_set_settings_to_hw(nic_dev, set_settings, autoneg, speed); else netif_info(nic_dev, drv, netdev, "Nothing changed, exit without setting anything\n"); return err; } static int hinic_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *link_settings) { /* only support to set autoneg and speed */ return set_link_settings(netdev, link_settings->base.autoneg, link_settings->base.speed); } static void hinic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { struct hinic_dev *nic_dev = netdev_priv(netdev); u8 mgmt_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0}; struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; int err; strscpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver)); strscpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info)); err = hinic_get_mgmt_version(nic_dev, mgmt_ver); if (err) return; snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver); } static void hinic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct hinic_dev *nic_dev = netdev_priv(netdev); ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH; ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH; ring->rx_pending = nic_dev->rq_depth; ring->tx_pending = nic_dev->sq_depth; } static int check_ringparam_valid(struct hinic_dev *nic_dev, struct ethtool_ringparam *ring) { if (ring->rx_jumbo_pending || ring->rx_mini_pending) { netif_err(nic_dev, drv, nic_dev->netdev, "Unsupported rx_jumbo_pending/rx_mini_pending\n"); return -EINVAL; } if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH || ring->tx_pending < HINIC_MIN_QUEUE_DEPTH || ring->rx_pending > HINIC_MAX_QUEUE_DEPTH || ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) { netif_err(nic_dev, drv, nic_dev->netdev, "Queue depth out of range [%d-%d]\n", HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH); return -EINVAL; } return 0; } static int hinic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct hinic_dev *nic_dev = netdev_priv(netdev); u16 new_sq_depth, new_rq_depth; int err; err = check_ringparam_valid(nic_dev, ring); if (err) return err; new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending)); new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending)); if (new_sq_depth == nic_dev->sq_depth && new_rq_depth == nic_dev->rq_depth) return 0; netif_info(nic_dev, drv, netdev, "Change Tx/Rx ring depth from %d/%d to %d/%d\n", nic_dev->sq_depth, nic_dev->rq_depth, new_sq_depth, new_rq_depth); nic_dev->sq_depth = new_sq_depth; nic_dev->rq_depth = new_rq_depth; if (netif_running(netdev)) { netif_info(nic_dev, drv, netdev, "Restarting netdev\n"); err = hinic_close(netdev); if (err) { netif_err(nic_dev, drv, netdev, "Failed to close netdev\n"); return -EFAULT; } err = hinic_open(netdev); if (err) { netif_err(nic_dev, drv, netdev, "Failed to open netdev\n"); return -EFAULT; } } return 0; } static int __hinic_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, u16 queue) { struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_intr_coal_info *rx_intr_coal_info; struct hinic_intr_coal_info *tx_intr_coal_info; if (queue == COALESCE_ALL_QUEUE) { /* get tx/rx irq0 as default parameters */ rx_intr_coal_info = &nic_dev->rx_intr_coalesce[0]; tx_intr_coal_info = &nic_dev->tx_intr_coalesce[0]; } else { if (queue >= nic_dev->num_qps) { netif_err(nic_dev, drv, netdev, "Invalid queue_id: %d\n", queue); return -EINVAL; } rx_intr_coal_info = &nic_dev->rx_intr_coalesce[queue]; tx_intr_coal_info = &nic_dev->tx_intr_coalesce[queue]; } /* coalesce_timer is in unit of 9us */ coal->rx_coalesce_usecs = rx_intr_coal_info->coalesce_timer_cfg * COALESCE_TIMER_CFG_UNIT; /* coalesced_frames is in unit of 8 */ coal->rx_max_coalesced_frames = rx_intr_coal_info->pending_limt * COALESCE_PENDING_LIMIT_UNIT; coal->tx_coalesce_usecs = tx_intr_coal_info->coalesce_timer_cfg * COALESCE_TIMER_CFG_UNIT; coal->tx_max_coalesced_frames = tx_intr_coal_info->pending_limt * COALESCE_PENDING_LIMIT_UNIT; return 0; } static int is_coalesce_exceed_limit(const struct ethtool_coalesce *coal) { if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG || coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT || coal->tx_coalesce_usecs > COALESCE_MAX_TIMER_CFG || coal->tx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT) return -ERANGE; return 0; } static int set_queue_coalesce(struct hinic_dev *nic_dev, u16 q_id, struct hinic_intr_coal_info *coal, bool set_rx_coal) { struct hinic_intr_coal_info *intr_coal = NULL; struct hinic_msix_config interrupt_info = {0}; struct net_device *netdev = nic_dev->netdev; u16 msix_idx; int err; intr_coal = set_rx_coal ? &nic_dev->rx_intr_coalesce[q_id] : &nic_dev->tx_intr_coalesce[q_id]; intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg; intr_coal->pending_limt = coal->pending_limt; /* netdev not running or qp not in using, * don't need to set coalesce to hw */ if (!(nic_dev->flags & HINIC_INTF_UP) || q_id >= nic_dev->num_qps) return 0; msix_idx = set_rx_coal ? nic_dev->rxqs[q_id].rq->msix_entry : nic_dev->txqs[q_id].sq->msix_entry; interrupt_info.msix_index = msix_idx; interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg; interrupt_info.pending_cnt = intr_coal->pending_limt; interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg; err = hinic_set_interrupt_cfg(nic_dev->hwdev, &interrupt_info); if (err) netif_warn(nic_dev, drv, netdev, "Failed to set %s queue%d coalesce", set_rx_coal ? "rx" : "tx", q_id); return err; } static int __set_hw_coal_param(struct hinic_dev *nic_dev, struct hinic_intr_coal_info *intr_coal, u16 queue, bool set_rx_coal) { int err; u16 i; if (queue == COALESCE_ALL_QUEUE) { for (i = 0; i < nic_dev->max_qps; i++) { err = set_queue_coalesce(nic_dev, i, intr_coal, set_rx_coal); if (err) return err; } } else { if (queue >= nic_dev->num_qps) { netif_err(nic_dev, drv, nic_dev->netdev, "Invalid queue_id: %d\n", queue); return -EINVAL; } err = set_queue_coalesce(nic_dev, queue, intr_coal, set_rx_coal); if (err) return err; } return 0; } static int __hinic_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, u16 queue) { struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_intr_coal_info rx_intr_coal = {0}; struct hinic_intr_coal_info tx_intr_coal = {0}; bool set_rx_coal = false; bool set_tx_coal = false; int err; err = is_coalesce_exceed_limit(coal); if (err) return err; if (coal->rx_coalesce_usecs || coal->rx_max_coalesced_frames) { rx_intr_coal.coalesce_timer_cfg = (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); rx_intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames / COALESCE_PENDING_LIMIT_UNIT); set_rx_coal = true; } if (coal->tx_coalesce_usecs || coal->tx_max_coalesced_frames) { tx_intr_coal.coalesce_timer_cfg = (u8)(coal->tx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); tx_intr_coal.pending_limt = (u8)(coal->tx_max_coalesced_frames / COALESCE_PENDING_LIMIT_UNIT); set_tx_coal = true; } /* setting coalesce timer or pending limit to zero will disable * coalesce */ if (set_rx_coal && (!rx_intr_coal.coalesce_timer_cfg || !rx_intr_coal.pending_limt)) netif_warn(nic_dev, drv, netdev, "RX coalesce will be disabled\n"); if (set_tx_coal && (!tx_intr_coal.coalesce_timer_cfg || !tx_intr_coal.pending_limt)) netif_warn(nic_dev, drv, netdev, "TX coalesce will be disabled\n"); if (set_rx_coal) { err = __set_hw_coal_param(nic_dev, &rx_intr_coal, queue, true); if (err) return err; } if (set_tx_coal) { err = __set_hw_coal_param(nic_dev, &tx_intr_coal, queue, false); if (err) return err; } return 0; } static int hinic_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE); } static int hinic_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE); } static int hinic_get_per_queue_coalesce(struct net_device *netdev, u32 queue, struct ethtool_coalesce *coal) { return __hinic_get_coalesce(netdev, coal, queue); } static int hinic_set_per_queue_coalesce(struct net_device *netdev, u32 queue, struct ethtool_coalesce *coal) { return __hinic_set_coalesce(netdev, coal, queue); } static void hinic_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_pause_config pause_info = {0}; struct hinic_nic_cfg *nic_cfg; int err; nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg; err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info); if (!err) { pause->autoneg = pause_info.auto_neg; if (nic_cfg->pause_set || !pause_info.auto_neg) { pause->rx_pause = nic_cfg->rx_pause; pause->tx_pause = nic_cfg->tx_pause; } else { pause->rx_pause = pause_info.rx_pause; pause->tx_pause = pause_info.tx_pause; } } } static int hinic_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_pause_config pause_info = {0}; struct hinic_port_cap port_cap = {0}; int err; err = hinic_port_get_cap(nic_dev, &port_cap); if (err) return -EIO; if (pause->autoneg != port_cap.autoneg_state) return -EOPNOTSUPP; pause_info.auto_neg = pause->autoneg; pause_info.rx_pause = pause->rx_pause; pause_info.tx_pause = pause->tx_pause; mutex_lock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex); err = hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info); if (err) { mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex); return err; } nic_dev->hwdev->func_to_io.nic_cfg.pause_set = true; nic_dev->hwdev->func_to_io.nic_cfg.auto_neg = pause->autoneg; nic_dev->hwdev->func_to_io.nic_cfg.rx_pause = pause->rx_pause; nic_dev->hwdev->func_to_io.nic_cfg.tx_pause = pause->tx_pause; mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex); return 0; } static void hinic_get_channels(struct net_device *netdev, struct ethtool_channels *channels) { struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_hwdev *hwdev = nic_dev->hwdev; channels->max_combined = nic_dev->max_qps; channels->combined_count = hinic_hwdev_num_qps(hwdev); } static int hinic_set_channels(struct net_device *netdev, struct ethtool_channels *channels) { struct hinic_dev *nic_dev = netdev_priv(netdev); unsigned int count = channels->combined_count; int err; netif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n", hinic_hwdev_num_qps(nic_dev->hwdev), count); if (netif_running(netdev)) { netif_info(nic_dev, drv, netdev, "Restarting netdev\n"); hinic_close(netdev); nic_dev->hwdev->nic_cap.num_qps = count; err = hinic_open(netdev); if (err) { netif_err(nic_dev, drv, netdev, "Failed to open netdev\n"); return -EFAULT; } } else { nic_dev->hwdev->nic_cap.num_qps = count; } return 0; } static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev, struct ethtool_rxnfc *cmd) { struct hinic_rss_type rss_type = { 0 }; int err; cmd->data = 0; if (!(nic_dev->flags & HINIC_RSS_ENABLE)) return 0; err = hinic_get_rss_type(nic_dev, nic_dev->rss_tmpl_idx, &rss_type); if (err) return err; cmd->data = RXH_IP_SRC | RXH_IP_DST; switch (cmd->flow_type) { case TCP_V4_FLOW: if (rss_type.tcp_ipv4) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case TCP_V6_FLOW: if (rss_type.tcp_ipv6) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V4_FLOW: if (rss_type.udp_ipv4) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V6_FLOW: if (rss_type.udp_ipv6) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case IPV4_FLOW: case IPV6_FLOW: break; default: cmd->data = 0; return -EINVAL; } return 0; } static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd, struct hinic_rss_type *rss_type) { u8 rss_l4_en = 0; switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: rss_l4_en = 0; break; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): rss_l4_en = 1; break; default: return -EINVAL; } switch (cmd->flow_type) { case TCP_V4_FLOW: rss_type->tcp_ipv4 = rss_l4_en; break; case TCP_V6_FLOW: rss_type->tcp_ipv6 = rss_l4_en; break; case UDP_V4_FLOW: rss_type->udp_ipv4 = rss_l4_en; break; case UDP_V6_FLOW: rss_type->udp_ipv6 = rss_l4_en; break; default: return -EINVAL; } return 0; } static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev, struct ethtool_rxnfc *cmd) { struct hinic_rss_type *rss_type = &nic_dev->rss_type; int err; if (!(nic_dev->flags & HINIC_RSS_ENABLE)) { cmd->data = 0; return -EOPNOTSUPP; } /* RSS does not support anything other than hashing * to queues on src and dst IPs and ports */ if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) return -EINVAL; /* We need at least the IP SRC and DEST fields for hashing */ if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST)) return -EINVAL; err = hinic_get_rss_type(nic_dev, nic_dev->rss_tmpl_idx, rss_type); if (err) return -EFAULT; switch (cmd->flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: case UDP_V4_FLOW: case UDP_V6_FLOW: err = set_l4_rss_hash_ops(cmd, rss_type); if (err) return err; break; case IPV4_FLOW: rss_type->ipv4 = 1; break; case IPV6_FLOW: rss_type->ipv6 = 1; break; default: return -EINVAL; } err = hinic_set_rss_type(nic_dev, nic_dev->rss_tmpl_idx, *rss_type); if (err) return -EFAULT; return 0; } static int __set_rss_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key) { struct hinic_dev *nic_dev = netdev_priv(netdev); int err; if (indir) { if (!nic_dev->rss_indir_user) { nic_dev->rss_indir_user = kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE, GFP_KERNEL); if (!nic_dev->rss_indir_user) return -ENOMEM; } memcpy(nic_dev->rss_indir_user, indir, sizeof(u32) * HINIC_RSS_INDIR_SIZE); err = hinic_rss_set_indir_tbl(nic_dev, nic_dev->rss_tmpl_idx, indir); if (err) return -EFAULT; } if (key) { if (!nic_dev->rss_hkey_user) { nic_dev->rss_hkey_user = kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL); if (!nic_dev->rss_hkey_user) return -ENOMEM; } memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE); err = hinic_rss_set_template_tbl(nic_dev, nic_dev->rss_tmpl_idx, key); if (err) return -EFAULT; } return 0; } static int hinic_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct hinic_dev *nic_dev = netdev_priv(netdev); int err = 0; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: cmd->data = nic_dev->num_qps; break; case ETHTOOL_GRXFH: err = hinic_get_rss_hash_opts(nic_dev, cmd); break; default: err = -EOPNOTSUPP; break; } return err; } static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) { struct hinic_dev *nic_dev = netdev_priv(netdev); int err = 0; switch (cmd->cmd) { case ETHTOOL_SRXFH: err = hinic_set_rss_hash_opts(nic_dev, cmd); break; default: err = -EOPNOTSUPP; break; } return err; } static int hinic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct hinic_dev *nic_dev = netdev_priv(netdev); u8 hash_engine_type = 0; int err = 0; if (!(nic_dev->flags & HINIC_RSS_ENABLE)) return -EOPNOTSUPP; if (hfunc) { err = hinic_rss_get_hash_engine(nic_dev, nic_dev->rss_tmpl_idx, &hash_engine_type); if (err) return -EFAULT; *hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR; } if (indir) { err = hinic_rss_get_indir_tbl(nic_dev, nic_dev->rss_tmpl_idx, indir); if (err) return -EFAULT; } if (key) err = hinic_rss_get_template_tbl(nic_dev, nic_dev->rss_tmpl_idx, key); return err; } static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { struct hinic_dev *nic_dev = netdev_priv(netdev); int err = 0; if (!(nic_dev->flags & HINIC_RSS_ENABLE)) return -EOPNOTSUPP; if (hfunc != ETH_RSS_HASH_NO_CHANGE) { if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) return -EOPNOTSUPP; nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ? HINIC_RSS_HASH_ENGINE_TYPE_XOR : HINIC_RSS_HASH_ENGINE_TYPE_TOEP; err = hinic_rss_set_hash_engine (nic_dev, nic_dev->rss_tmpl_idx, nic_dev->rss_hash_engine); if (err) return -EFAULT; } err = __set_rss_rxfh(netdev, indir, key); return err; } static u32 hinic_get_rxfh_key_size(struct net_device *netdev) { return HINIC_RSS_KEY_SIZE; } static u32 hinic_get_rxfh_indir_size(struct net_device *netdev) { return HINIC_RSS_INDIR_SIZE; } #define HINIC_FUNC_STAT(_stat_item) { \ .name = #_stat_item, \ .size = sizeof_field(struct hinic_vport_stats, _stat_item), \ .offset = offsetof(struct hinic_vport_stats, _stat_item) \ } static struct hinic_stats hinic_function_stats[] = { HINIC_FUNC_STAT(tx_unicast_pkts_vport), HINIC_FUNC_STAT(tx_unicast_bytes_vport), HINIC_FUNC_STAT(tx_multicast_pkts_vport), HINIC_FUNC_STAT(tx_multicast_bytes_vport), HINIC_FUNC_STAT(tx_broadcast_pkts_vport), HINIC_FUNC_STAT(tx_broadcast_bytes_vport), HINIC_FUNC_STAT(rx_unicast_pkts_vport), HINIC_FUNC_STAT(rx_unicast_bytes_vport), HINIC_FUNC_STAT(rx_multicast_pkts_vport), HINIC_FUNC_STAT(rx_multicast_bytes_vport), HINIC_FUNC_STAT(rx_broadcast_pkts_vport), HINIC_FUNC_STAT(rx_broadcast_bytes_vport), HINIC_FUNC_STAT(tx_discard_vport), HINIC_FUNC_STAT(rx_discard_vport), HINIC_FUNC_STAT(tx_err_vport), HINIC_FUNC_STAT(rx_err_vport), }; static char hinic_test_strings[][ETH_GSTRING_LEN] = { "Internal lb test (on/offline)", "External lb test (external_lb)", }; #define HINIC_PORT_STAT(_stat_item) { \ .name = #_stat_item, \ .size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \ .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \ } static struct hinic_stats hinic_port_stats[] = { HINIC_PORT_STAT(mac_rx_total_pkt_num), HINIC_PORT_STAT(mac_rx_total_oct_num), HINIC_PORT_STAT(mac_rx_bad_pkt_num), HINIC_PORT_STAT(mac_rx_bad_oct_num), HINIC_PORT_STAT(mac_rx_good_pkt_num), HINIC_PORT_STAT(mac_rx_good_oct_num), HINIC_PORT_STAT(mac_rx_uni_pkt_num), HINIC_PORT_STAT(mac_rx_multi_pkt_num), HINIC_PORT_STAT(mac_rx_broad_pkt_num), HINIC_PORT_STAT(mac_tx_total_pkt_num), HINIC_PORT_STAT(mac_tx_total_oct_num), HINIC_PORT_STAT(mac_tx_bad_pkt_num), HINIC_PORT_STAT(mac_tx_bad_oct_num), HINIC_PORT_STAT(mac_tx_good_pkt_num), HINIC_PORT_STAT(mac_tx_good_oct_num), HINIC_PORT_STAT(mac_tx_uni_pkt_num), HINIC_PORT_STAT(mac_tx_multi_pkt_num), HINIC_PORT_STAT(mac_tx_broad_pkt_num), HINIC_PORT_STAT(mac_rx_fragment_pkt_num), HINIC_PORT_STAT(mac_rx_undersize_pkt_num), HINIC_PORT_STAT(mac_rx_undermin_pkt_num), HINIC_PORT_STAT(mac_rx_64_oct_pkt_num), HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num), HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num), HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num), HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num), HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num), HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num), HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num), HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num), HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num), HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num), HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num), HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num), HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num), HINIC_PORT_STAT(mac_rx_oversize_pkt_num), HINIC_PORT_STAT(mac_rx_jabber_pkt_num), HINIC_PORT_STAT(mac_rx_pause_num), HINIC_PORT_STAT(mac_rx_pfc_pkt_num), HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num), HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num), HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num), HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num), HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num), HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num), HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num), HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num), HINIC_PORT_STAT(mac_rx_control_pkt_num), HINIC_PORT_STAT(mac_rx_sym_err_pkt_num), HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num), HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num), HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num), HINIC_PORT_STAT(mac_tx_fragment_pkt_num), HINIC_PORT_STAT(mac_tx_undersize_pkt_num), HINIC_PORT_STAT(mac_tx_undermin_pkt_num), HINIC_PORT_STAT(mac_tx_64_oct_pkt_num), HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num), HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num), HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num), HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num), HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num), HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num), HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num), HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num), HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num), HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num), HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num), HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num), HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num), HINIC_PORT_STAT(mac_tx_oversize_pkt_num), HINIC_PORT_STAT(mac_tx_jabber_pkt_num), HINIC_PORT_STAT(mac_tx_pause_num), HINIC_PORT_STAT(mac_tx_pfc_pkt_num), HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num), HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num), HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num), HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num), HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num), HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num), HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num), HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num), HINIC_PORT_STAT(mac_tx_control_pkt_num), HINIC_PORT_STAT(mac_tx_err_all_pkt_num), HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num), HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num), }; #define HINIC_TXQ_STAT(_stat_item) { \ .name = "txq%d_"#_stat_item, \ .size = sizeof_field(struct hinic_txq_stats, _stat_item), \ .offset = offsetof(struct hinic_txq_stats, _stat_item) \ } static struct hinic_stats hinic_tx_queue_stats[] = { HINIC_TXQ_STAT(pkts), HINIC_TXQ_STAT(bytes), HINIC_TXQ_STAT(tx_busy), HINIC_TXQ_STAT(tx_wake), HINIC_TXQ_STAT(tx_dropped), HINIC_TXQ_STAT(big_frags_pkts), }; #define HINIC_RXQ_STAT(_stat_item) { \ .name = "rxq%d_"#_stat_item, \ .size = sizeof_field(struct hinic_rxq_stats, _stat_item), \ .offset = offsetof(struct hinic_rxq_stats, _stat_item) \ } static struct hinic_stats hinic_rx_queue_stats[] = { HINIC_RXQ_STAT(pkts), HINIC_RXQ_STAT(bytes), HINIC_RXQ_STAT(errors), HINIC_RXQ_STAT(csum_errors), HINIC_RXQ_STAT(other_errors), }; static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data) { struct hinic_txq_stats txq_stats; struct hinic_rxq_stats rxq_stats; u16 i = 0, j = 0, qid = 0; char *p; for (qid = 0; qid < nic_dev->num_qps; qid++) { if (!nic_dev->txqs) break; hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats); for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++, i++) { p = (char *)&txq_stats + hinic_tx_queue_stats[j].offset; data[i] = (hinic_tx_queue_stats[j].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } for (qid = 0; qid < nic_dev->num_qps; qid++) { if (!nic_dev->rxqs) break; hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats); for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++, i++) { p = (char *)&rxq_stats + hinic_rx_queue_stats[j].offset; data[i] = (hinic_rx_queue_stats[j].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } } static void hinic_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_vport_stats vport_stats = {0}; struct hinic_phy_port_stats *port_stats; u16 i = 0, j = 0; char *p; int err; err = hinic_get_vport_stats(nic_dev, &vport_stats); if (err) netif_err(nic_dev, drv, netdev, "Failed to get vport stats from firmware\n"); for (j = 0; j < ARRAY_SIZE(hinic_function_stats); j++, i++) { p = (char *)&vport_stats + hinic_function_stats[j].offset; data[i] = (hinic_function_stats[j].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); if (!port_stats) { memset(&data[i], 0, ARRAY_SIZE(hinic_port_stats) * sizeof(*data)); i += ARRAY_SIZE(hinic_port_stats); goto get_drv_stats; } err = hinic_get_phy_port_stats(nic_dev, port_stats); if (err) netif_err(nic_dev, drv, netdev, "Failed to get port stats from firmware\n"); for (j = 0; j < ARRAY_SIZE(hinic_port_stats); j++, i++) { p = (char *)port_stats + hinic_port_stats[j].offset; data[i] = (hinic_port_stats[j].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } kfree(port_stats); get_drv_stats: get_drv_queue_stats(nic_dev, data + i); } static int hinic_get_sset_count(struct net_device *netdev, int sset) { struct hinic_dev *nic_dev = netdev_priv(netdev); int count, q_num; switch (sset) { case ETH_SS_TEST: return ARRAY_SIZE(hinic_test_strings); case ETH_SS_STATS: q_num = nic_dev->num_qps; count = ARRAY_SIZE(hinic_function_stats) + (ARRAY_SIZE(hinic_tx_queue_stats) + ARRAY_SIZE(hinic_rx_queue_stats)) * q_num; count += ARRAY_SIZE(hinic_port_stats); return count; default: return -EOPNOTSUPP; } } static void hinic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct hinic_dev *nic_dev = netdev_priv(netdev); char *p = (char *)data; u16 i, j; switch (stringset) { case ETH_SS_TEST: memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings)); return; case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) { memcpy(p, hinic_function_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) { memcpy(p, hinic_port_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } for (i = 0; i < nic_dev->num_qps; i++) { for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) { sprintf(p, hinic_tx_queue_stats[j].name, i); p += ETH_GSTRING_LEN; } } for (i = 0; i < nic_dev->num_qps; i++) { for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) { sprintf(p, hinic_rx_queue_stats[j].name, i); p += ETH_GSTRING_LEN; } } return; default: return; } } static int hinic_run_lp_test(struct hinic_dev *nic_dev, u32 test_time) { u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf; struct net_device *netdev = nic_dev->netdev; struct sk_buff *skb_tmp = NULL; struct sk_buff *skb = NULL; u32 cnt = test_time * 5; u8 *test_data = NULL; u32 i; u8 j; skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC); if (!skb_tmp) return -ENOMEM; test_data = __skb_put(skb_tmp, LP_PKT_LEN); memset(test_data, 0xFF, 2 * ETH_ALEN); test_data[ETH_ALEN] = 0xFE; test_data[2 * ETH_ALEN] = 0x08; test_data[2 * ETH_ALEN + 1] = 0x0; for (i = ETH_HLEN; i < LP_PKT_LEN; i++) test_data[i] = i & 0xFF; skb_tmp->queue_mapping = 0; skb_tmp->ip_summed = CHECKSUM_COMPLETE; skb_tmp->dev = netdev; for (i = 0; i < cnt; i++) { nic_dev->lb_test_rx_idx = 0; memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN); for (j = 0; j < LP_PKT_CNT; j++) { skb = pskb_copy(skb_tmp, GFP_ATOMIC); if (!skb) { dev_kfree_skb_any(skb_tmp); netif_err(nic_dev, drv, netdev, "Copy skb failed for loopback test\n"); return -ENOMEM; } /* mark index for every pkt */ skb->data[LP_PKT_LEN - 1] = j; if (hinic_lb_xmit_frame(skb, netdev)) { dev_kfree_skb_any(skb); dev_kfree_skb_any(skb_tmp); netif_err(nic_dev, drv, netdev, "Xmit pkt failed for loopback test\n"); return -EBUSY; } } /* wait till all pkts received to RX buffer */ msleep(200); for (j = 0; j < LP_PKT_CNT; j++) { if (memcmp(lb_test_rx_buf + j * LP_PKT_LEN, skb_tmp->data, LP_PKT_LEN - 1) || (*(lb_test_rx_buf + j * LP_PKT_LEN + LP_PKT_LEN - 1) != j)) { dev_kfree_skb_any(skb_tmp); netif_err(nic_dev, drv, netdev, "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n", j + i * LP_PKT_CNT, LP_PKT_LEN - 1, *(lb_test_rx_buf + j * LP_PKT_LEN + LP_PKT_LEN - 1)); return -EIO; } } } dev_kfree_skb_any(skb_tmp); return 0; } static int do_lp_test(struct hinic_dev *nic_dev, u32 flags, u32 test_time, enum diag_test_index *test_index) { struct net_device *netdev = nic_dev->netdev; u8 *lb_test_rx_buf = NULL; int err = 0; if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) { *test_index = INTERNAL_LP_TEST; if (hinic_set_loopback_mode(nic_dev->hwdev, HINIC_INTERNAL_LP_MODE, true)) { netif_err(nic_dev, drv, netdev, "Failed to set port loopback mode before loopback test\n"); return -EIO; } } else { *test_index = EXTERNAL_LP_TEST; } lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN); if (!lb_test_rx_buf) { err = -ENOMEM; } else { nic_dev->lb_test_rx_buf = lb_test_rx_buf; nic_dev->lb_pkt_len = LP_PKT_LEN; nic_dev->flags |= HINIC_LP_TEST; err = hinic_run_lp_test(nic_dev, test_time); nic_dev->flags &= ~HINIC_LP_TEST; msleep(100); vfree(lb_test_rx_buf); nic_dev->lb_test_rx_buf = NULL; } if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) { if (hinic_set_loopback_mode(nic_dev->hwdev, HINIC_INTERNAL_LP_MODE, false)) { netif_err(nic_dev, drv, netdev, "Failed to cancel port loopback mode after loopback test\n"); err = -EIO; } } return err; } static void hinic_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct hinic_dev *nic_dev = netdev_priv(netdev); enum hinic_port_link_state link_state; enum diag_test_index test_index = 0; int err = 0; memset(data, 0, DIAG_TEST_MAX * sizeof(u64)); /* don't support loopback test when netdev is closed. */ if (!(nic_dev->flags & HINIC_INTF_UP)) { netif_err(nic_dev, drv, netdev, "Do not support loopback test when netdev is closed\n"); eth_test->flags |= ETH_TEST_FL_FAILED; data[PORT_DOWN_ERR_IDX] = 1; return; } netif_carrier_off(netdev); netif_tx_disable(netdev); err = do_lp_test(nic_dev, eth_test->flags, LP_DEFAULT_TIME, &test_index); if (err) { eth_test->flags |= ETH_TEST_FL_FAILED; data[test_index] = 1; } netif_tx_wake_all_queues(netdev); err = hinic_port_link_state(nic_dev, &link_state); if (!err && link_state == HINIC_LINK_STATE_UP) netif_carrier_on(netdev); } static int hinic_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct hinic_dev *nic_dev = netdev_priv(netdev); int err = 0; u8 port; port = nic_dev->hwdev->port_id; switch (state) { case ETHTOOL_ID_ACTIVE: err = hinic_set_led_status(nic_dev->hwdev, port, HINIC_LED_TYPE_LINK, HINIC_LED_MODE_FORCE_2HZ); if (err) netif_err(nic_dev, drv, netdev, "Set LED blinking in 2HZ failed\n"); break; case ETHTOOL_ID_INACTIVE: err = hinic_reset_led_status(nic_dev->hwdev, port); if (err) netif_err(nic_dev, drv, netdev, "Reset LED to original status failed\n"); break; default: return -EOPNOTSUPP; } return err; } static int hinic_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo) { struct hinic_dev *nic_dev = netdev_priv(netdev); u8 sfp_type_ext; u8 sfp_type; int err; err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext); if (err) return err; switch (sfp_type) { case SFF8024_ID_SFP: modinfo->type = ETH_MODULE_SFF_8472; modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; break; case SFF8024_ID_QSFP_8438: modinfo->type = ETH_MODULE_SFF_8436; modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; break; case SFF8024_ID_QSFP_8436_8636: if (sfp_type_ext >= 0x3) { modinfo->type = ETH_MODULE_SFF_8636; modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; } else { modinfo->type = ETH_MODULE_SFF_8436; modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; } break; case SFF8024_ID_QSFP28_8636: modinfo->type = ETH_MODULE_SFF_8636; modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; break; default: netif_warn(nic_dev, drv, netdev, "Optical module unknown: 0x%x\n", sfp_type); return -EINVAL; } return 0; } static int hinic_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct hinic_dev *nic_dev = netdev_priv(netdev); u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; u16 len; int err; if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE)) return -EINVAL; memset(data, 0, ee->len); err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len); if (err) return err; memcpy(data, sfp_data + ee->offset, ee->len); return 0; } static int hinic_get_link_ext_state(struct net_device *netdev, struct ethtool_link_ext_state_info *link_ext_state_info) { struct hinic_dev *nic_dev = netdev_priv(netdev); if (netif_carrier_ok(netdev)) return -ENODATA; if (nic_dev->cable_unplugged) link_ext_state_info->link_ext_state = ETHTOOL_LINK_EXT_STATE_NO_CABLE; else if (nic_dev->module_unrecognized) link_ext_state_info->link_ext_state = ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH; return 0; } static const struct ethtool_ops hinic_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_RX_MAX_FRAMES | ETHTOOL_COALESCE_TX_USECS | ETHTOOL_COALESCE_TX_MAX_FRAMES, .get_link_ksettings = hinic_get_link_ksettings, .set_link_ksettings = hinic_set_link_ksettings, .get_drvinfo = hinic_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ext_state = hinic_get_link_ext_state, .get_ringparam = hinic_get_ringparam, .set_ringparam = hinic_set_ringparam, .get_coalesce = hinic_get_coalesce, .set_coalesce = hinic_set_coalesce, .get_per_queue_coalesce = hinic_get_per_queue_coalesce, .set_per_queue_coalesce = hinic_set_per_queue_coalesce, .get_pauseparam = hinic_get_pauseparam, .set_pauseparam = hinic_set_pauseparam, .get_channels = hinic_get_channels, .set_channels = hinic_set_channels, .get_rxnfc = hinic_get_rxnfc, .set_rxnfc = hinic_set_rxnfc, .get_rxfh_key_size = hinic_get_rxfh_key_size, .get_rxfh_indir_size = hinic_get_rxfh_indir_size, .get_rxfh = hinic_get_rxfh, .set_rxfh = hinic_set_rxfh, .get_sset_count = hinic_get_sset_count, .get_ethtool_stats = hinic_get_ethtool_stats, .get_strings = hinic_get_strings, .self_test = hinic_diag_test, .set_phys_id = hinic_set_phys_id, .get_module_info = hinic_get_module_info, .get_module_eeprom = hinic_get_module_eeprom, }; static const struct ethtool_ops hinicvf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_RX_MAX_FRAMES | ETHTOOL_COALESCE_TX_USECS | ETHTOOL_COALESCE_TX_MAX_FRAMES, .get_link_ksettings = hinic_get_link_ksettings, .get_drvinfo = hinic_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = hinic_get_ringparam, .set_ringparam = hinic_set_ringparam, .get_coalesce = hinic_get_coalesce, .set_coalesce = hinic_set_coalesce, .get_per_queue_coalesce = hinic_get_per_queue_coalesce, .set_per_queue_coalesce = hinic_set_per_queue_coalesce, .get_channels = hinic_get_channels, .set_channels = hinic_set_channels, .get_rxnfc = hinic_get_rxnfc, .set_rxnfc = hinic_set_rxnfc, .get_rxfh_key_size = hinic_get_rxfh_key_size, .get_rxfh_indir_size = hinic_get_rxfh_indir_size, .get_rxfh = hinic_get_rxfh, .set_rxfh = hinic_set_rxfh, .get_sset_count = hinic_get_sset_count, .get_ethtool_stats = hinic_get_ethtool_stats, .get_strings = hinic_get_strings, }; void hinic_set_ethtool_ops(struct net_device *netdev) { struct hinic_dev *nic_dev = netdev_priv(netdev); if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) netdev->ethtool_ops = &hinic_ethtool_ops; else netdev->ethtool_ops = &hinicvf_ethtool_ops; }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
// SPDX-License-Identifier: GPL-2.0-only /* Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/pci.h> #include <linux/if_vlan.h> #include <linux/interrupt.h> #include <linux/etherdevice.h> #include <linux/netdevice.h> #include <linux/module.h> #include "hinic_hw_dev.h" #include "hinic_dev.h" #include "hinic_hw_mbox.h" #include "hinic_hw_cmdq.h" #include "hinic_port.h" #include "hinic_sriov.h" static unsigned char set_vf_link_state; module_param(set_vf_link_state, byte, 0444); MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto, 1 represents link always up, 2 represents link always down. - default is 0."); #define HINIC_VLAN_PRIORITY_SHIFT 13 #define HINIC_ADD_VLAN_IN_MAC 0x8000 #define HINIC_TX_RATE_TABLE_FULL 12 #define HINIC_MAX_QOS 7 static int hinic_set_mac(struct hinic_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id) { struct hinic_port_mac_cmd mac_info = {0}; u16 out_size = sizeof(mac_info); int err; mac_info.func_idx = func_id; mac_info.vlan_id = vlan_id; memcpy(mac_info.mac, mac_addr, ETH_ALEN); err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info, sizeof(mac_info), &mac_info, &out_size); if (err || out_size != sizeof(mac_info) || (mac_info.status && mac_info.status != HINIC_MGMT_STATUS_EXIST)) { dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to set MAC, err: %d, status: 0x%x, out size: 0x%x\n", err, mac_info.status, out_size); return -EIO; } return 0; } static void hinic_notify_vf_link_status(struct hinic_hwdev *hwdev, u16 vf_id, u8 link_status) { struct vf_data_storage *vf_infos = hwdev->func_to_io.vf_infos; struct hinic_port_link_status link = {0}; u16 out_size = sizeof(link); int err; if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) { link.link = link_status; link.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id; err = hinic_mbox_to_vf(hwdev, HINIC_MOD_L2NIC, vf_id, HINIC_PORT_CMD_LINK_STATUS_REPORT, &link, sizeof(link), &link, &out_size, 0); if (err || !out_size || link.status) dev_err(&hwdev->hwif->pdev->dev, "Send link change event to VF %d failed, err: %d, status: 0x%x, out_size: 0x%x\n", HW_VF_ID_TO_OS(vf_id), err, link.status, out_size); } } /* send link change event mbox msg to active vfs under the pf */ void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev, u8 link_status) { struct hinic_func_to_io *nic_io = &hwdev->func_to_io; u16 i; nic_io->link_status = link_status; for (i = 1; i <= nic_io->max_vfs; i++) { if (!nic_io->vf_infos[HW_VF_ID_TO_OS(i)].link_forced) hinic_notify_vf_link_status(hwdev, i, link_status); } } static u16 hinic_vf_info_vlanprio(struct hinic_hwdev *hwdev, int vf_id) { struct hinic_func_to_io *nic_io = &hwdev->func_to_io; u16 pf_vlan, vlanprio; u8 pf_qos; pf_vlan = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan; pf_qos = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos; vlanprio = pf_vlan | pf_qos << HINIC_VLAN_PRIORITY_SHIFT; return vlanprio; } static int hinic_set_vf_vlan(struct hinic_hwdev *hwdev, bool add, u16 vid, u8 qos, int vf_id) { struct hinic_vf_vlan_config vf_vlan = {0}; u16 out_size = sizeof(vf_vlan); int err; u8 cmd; /* VLAN 0 is a special case, don't allow it to be removed */ if (!vid && !add) return 0; vf_vlan.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id; vf_vlan.vlan_id = vid; vf_vlan.qos = qos; if (add) cmd = HINIC_PORT_CMD_SET_VF_VLAN; else cmd = HINIC_PORT_CMD_CLR_VF_VLAN; err = hinic_port_msg_cmd(hwdev, cmd, &vf_vlan, sizeof(vf_vlan), &vf_vlan, &out_size); if (err || !out_size || vf_vlan.status) { dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF %d vlan, err: %d, status: 0x%x, out size: 0x%x\n", HW_VF_ID_TO_OS(vf_id), err, vf_vlan.status, out_size); return -EFAULT; } return 0; } static int hinic_set_vf_tx_rate_max_min(struct hinic_hwdev *hwdev, u16 vf_id, u32 max_rate, u32 min_rate) { struct hinic_func_to_io *nic_io = &hwdev->func_to_io; struct hinic_tx_rate_cfg_max_min rate_cfg = {0}; u16 out_size = sizeof(rate_cfg); int err; rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id; rate_cfg.max_rate = max_rate; rate_cfg.min_rate = min_rate; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE, &rate_cfg, sizeof(rate_cfg), &rate_cfg, &out_size); if ((rate_cfg.status != HINIC_MGMT_CMD_UNSUPPORTED && rate_cfg.status) || err || !out_size) { dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) max rate(%d), min rate(%d), err: %d, status: 0x%x, out size: 0x%x\n", HW_VF_ID_TO_OS(vf_id), max_rate, min_rate, err, rate_cfg.status, out_size); return -EIO; } if (!rate_cfg.status) { nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate; nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = min_rate; } return rate_cfg.status; } static int hinic_set_vf_rate_limit(struct hinic_hwdev *hwdev, u16 vf_id, u32 tx_rate) { struct hinic_func_to_io *nic_io = &hwdev->func_to_io; struct hinic_tx_rate_cfg rate_cfg = {0}; u16 out_size = sizeof(rate_cfg); int err; rate_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id; rate_cfg.tx_rate = tx_rate; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VF_RATE, &rate_cfg, sizeof(rate_cfg), &rate_cfg, &out_size); if (err || !out_size || rate_cfg.status) { dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) rate(%d), err: %d, status: 0x%x, out size: 0x%x\n", HW_VF_ID_TO_OS(vf_id), tx_rate, err, rate_cfg.status, out_size); if (rate_cfg.status) return rate_cfg.status; return -EIO; } nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = tx_rate; nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = 0; return 0; } static int hinic_set_vf_tx_rate(struct hinic_hwdev *hwdev, u16 vf_id, u32 max_rate, u32 min_rate) { int err; err = hinic_set_vf_tx_rate_max_min(hwdev, vf_id, max_rate, min_rate); if (err != HINIC_MGMT_CMD_UNSUPPORTED) return err; if (min_rate) { dev_err(&hwdev->hwif->pdev->dev, "Current firmware doesn't support to set min tx rate\n"); return -EOPNOTSUPP; } dev_info(&hwdev->hwif->pdev->dev, "Current firmware doesn't support to set min tx rate, force min_tx_rate = max_tx_rate\n"); return hinic_set_vf_rate_limit(hwdev, vf_id, max_rate); } static int hinic_init_vf_config(struct hinic_hwdev *hwdev, u16 vf_id) { struct vf_data_storage *vf_info; u16 func_id, vlan_id; int err = 0; vf_info = hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id); if (vf_info->pf_set_mac) { func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id; vlan_id = 0; err = hinic_set_mac(hwdev, vf_info->vf_mac_addr, vlan_id, func_id); if (err) { dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to set VF %d MAC\n", HW_VF_ID_TO_OS(vf_id)); return err; } } if (hinic_vf_info_vlanprio(hwdev, vf_id)) { err = hinic_set_vf_vlan(hwdev, true, vf_info->pf_vlan, vf_info->pf_qos, vf_id); if (err) { dev_err(&hwdev->hwif->pdev->dev, "Failed to add VF %d VLAN_QOS\n", HW_VF_ID_TO_OS(vf_id)); return err; } } if (vf_info->max_rate) { err = hinic_set_vf_tx_rate(hwdev, vf_id, vf_info->max_rate, vf_info->min_rate); if (err) { dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF %d max rate: %d, min rate: %d\n", HW_VF_ID_TO_OS(vf_id), vf_info->max_rate, vf_info->min_rate); return err; } } return 0; } static int hinic_register_vf_msg_handler(void *hwdev, u16 vf_id, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_register_vf *register_info = buf_out; struct hinic_hwdev *hw_dev = hwdev; struct hinic_func_to_io *nic_io; int err; nic_io = &hw_dev->func_to_io; if (vf_id > nic_io->max_vfs) { dev_err(&hw_dev->hwif->pdev->dev, "Register VF id %d exceed limit[0-%d]\n", HW_VF_ID_TO_OS(vf_id), HW_VF_ID_TO_OS(nic_io->max_vfs)); register_info->status = EFAULT; return -EFAULT; } *out_size = sizeof(*register_info); err = hinic_init_vf_config(hw_dev, vf_id); if (err) { register_info->status = EFAULT; return err; } nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = true; return 0; } static int hinic_unregister_vf_msg_handler(void *hwdev, u16 vf_id, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_hwdev *hw_dev = hwdev; struct hinic_func_to_io *nic_io; nic_io = &hw_dev->func_to_io; *out_size = 0; if (vf_id > nic_io->max_vfs) return 0; nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = false; return 0; } static int hinic_change_vf_mtu_msg_handler(void *hwdev, u16 vf_id, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_hwdev *hw_dev = hwdev; int err; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU, buf_in, in_size, buf_out, out_size); if (err) { dev_err(&hw_dev->hwif->pdev->dev, "Failed to set VF %u mtu\n", vf_id); return err; } return 0; } static int hinic_get_vf_mac_msg_handler(void *hwdev, u16 vf_id, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_port_mac_cmd *mac_info = buf_out; struct hinic_hwdev *dev = hwdev; struct hinic_func_to_io *nic_io; struct vf_data_storage *vf_info; nic_io = &dev->func_to_io; vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); memcpy(mac_info->mac, vf_info->vf_mac_addr, ETH_ALEN); mac_info->status = 0; *out_size = sizeof(*mac_info); return 0; } static int hinic_set_vf_mac_msg_handler(void *hwdev, u16 vf_id, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_port_mac_cmd *mac_out = buf_out; struct hinic_port_mac_cmd *mac_in = buf_in; struct hinic_hwdev *hw_dev = hwdev; struct hinic_func_to_io *nic_io; struct vf_data_storage *vf_info; int err; nic_io = &hw_dev->func_to_io; vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); if (vf_info->pf_set_mac && !(vf_info->trust) && is_valid_ether_addr(mac_in->mac)) { dev_warn(&hw_dev->hwif->pdev->dev, "PF has already set VF %d MAC address\n", HW_VF_ID_TO_OS(vf_id)); mac_out->status = HINIC_PF_SET_VF_ALREADY; *out_size = sizeof(*mac_out); return 0; } err = hinic_port_msg_cmd(hw_dev, HINIC_PORT_CMD_SET_MAC, buf_in, in_size, buf_out, out_size); if ((err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) { dev_err(&hw_dev->hwif->pdev->dev, "Failed to set VF %d MAC address, err: %d, status: 0x%x, out size: 0x%x\n", HW_VF_ID_TO_OS(vf_id), err, mac_out->status, *out_size); return -EFAULT; } return err; } static int hinic_del_vf_mac_msg_handler(void *hwdev, u16 vf_id, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_port_mac_cmd *mac_out = buf_out; struct hinic_port_mac_cmd *mac_in = buf_in; struct hinic_hwdev *hw_dev = hwdev; struct hinic_func_to_io *nic_io; struct vf_data_storage *vf_info; int err; nic_io = &hw_dev->func_to_io; vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); if (vf_info->pf_set_mac && is_valid_ether_addr(mac_in->mac) && !memcmp(vf_info->vf_mac_addr, mac_in->mac, ETH_ALEN)) { dev_warn(&hw_dev->hwif->pdev->dev, "PF has already set VF mac.\n"); mac_out->status = HINIC_PF_SET_VF_ALREADY; *out_size = sizeof(*mac_out); return 0; } err = hinic_port_msg_cmd(hw_dev, HINIC_PORT_CMD_DEL_MAC, buf_in, in_size, buf_out, out_size); if ((err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) || !(*out_size)) { dev_err(&hw_dev->hwif->pdev->dev, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n", HW_VF_ID_TO_OS(vf_id), err, mac_out->status, *out_size); return -EFAULT; } return err; } static int hinic_get_vf_link_status_msg_handler(void *hwdev, u16 vf_id, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_port_link_cmd *get_link = buf_out; struct hinic_hwdev *hw_dev = hwdev; struct vf_data_storage *vf_infos; struct hinic_func_to_io *nic_io; bool link_forced, link_up; nic_io = &hw_dev->func_to_io; vf_infos = nic_io->vf_infos; link_forced = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced; link_up = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up; if (link_forced) get_link->state = link_up ? HINIC_LINK_STATE_UP : HINIC_LINK_STATE_DOWN; else get_link->state = nic_io->link_status; get_link->status = 0; *out_size = sizeof(*get_link); return 0; } static bool check_func_table(struct hinic_hwdev *hwdev, u16 func_idx, void *buf_in, u16 in_size) { struct hinic_cmd_fw_ctxt *function_table = buf_in; if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size) || !function_table->rx_buf_sz) return false; return true; } static struct vf_cmd_msg_handle nic_vf_cmd_msg_handler[] = { {HINIC_PORT_CMD_VF_REGISTER, hinic_register_vf_msg_handler}, {HINIC_PORT_CMD_VF_UNREGISTER, hinic_unregister_vf_msg_handler}, {HINIC_PORT_CMD_CHANGE_MTU, hinic_change_vf_mtu_msg_handler}, {HINIC_PORT_CMD_GET_MAC, hinic_get_vf_mac_msg_handler}, {HINIC_PORT_CMD_SET_MAC, hinic_set_vf_mac_msg_handler}, {HINIC_PORT_CMD_DEL_MAC, hinic_del_vf_mac_msg_handler}, {HINIC_PORT_CMD_GET_LINK_STATE, hinic_get_vf_link_status_msg_handler}, }; static struct vf_cmd_check_handle nic_cmd_support_vf[] = { {HINIC_PORT_CMD_VF_REGISTER, NULL}, {HINIC_PORT_CMD_VF_UNREGISTER, NULL}, {HINIC_PORT_CMD_CHANGE_MTU, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_ADD_VLAN, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_DEL_VLAN, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_SET_MAC, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_GET_MAC, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_DEL_MAC, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_SET_RX_MODE, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_GET_PAUSE_INFO, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_GET_LINK_STATE, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_SET_LRO, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_SET_RX_CSUM, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_GET_VPORT_STAT, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_CLEAN_VPORT_STAT, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_GET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_SET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_RSS_TEMP_MGR, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_RSS_CFG, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_FWCTXT_INIT, check_func_table}, {HINIC_PORT_CMD_GET_MGMT_VERSION, NULL}, {HINIC_PORT_CMD_SET_FUNC_STATE, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_GET_GLOBAL_QPN, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_SET_TSO, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_SET_RQ_IQ_MAP, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_LINK_STATUS_REPORT, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_UPDATE_MAC, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_GET_CAP, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_GET_LINK_MODE, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_GET_VF_COS, NULL}, {HINIC_PORT_CMD_SET_VHD_CFG, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_SET_VLAN_FILTER, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_Q_FILTER, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_TCAM_FILTER, NULL}, {HINIC_PORT_CMD_UP_TC_ADD_FLOW, NULL}, {HINIC_PORT_CMD_UP_TC_DEL_FLOW, NULL}, {HINIC_PORT_CMD_UP_TC_FLUSH_TCAM, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_UP_TC_ENABLE, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_CABLE_PLUG_EVENT, NULL}, {HINIC_PORT_CMD_LINK_ERR_EVENT, NULL}, {HINIC_PORT_CMD_SET_PORT_STATE, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_SET_ETS, NULL}, {HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE, NULL}, {HINIC_PORT_CMD_RESET_LINK_CFG, hinic_mbox_check_func_id_8B}, {HINIC_PORT_CMD_SET_LINK_FOLLOW, NULL}, {HINIC_PORT_CMD_CLEAR_QP_RES, NULL}, }; #define CHECK_IPSU_15BIT 0X8000 static struct hinic_sriov_info *hinic_get_sriov_info_by_pcidev(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct hinic_dev *nic_dev = netdev_priv(netdev); return &nic_dev->sriov_info; } static int hinic_check_mac_info(u8 status, u16 vlan_id) { if ((status && status != HINIC_MGMT_STATUS_EXIST) || (vlan_id & CHECK_IPSU_15BIT && status == HINIC_MGMT_STATUS_EXIST)) return -EINVAL; return 0; } #define HINIC_VLAN_ID_MASK 0x7FFF static int hinic_update_mac(struct hinic_hwdev *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, u16 func_id) { struct hinic_port_mac_update mac_info = {0}; u16 out_size = sizeof(mac_info); int err; if (!hwdev || !old_mac || !new_mac) return -EINVAL; if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) { dev_err(&hwdev->hwif->pdev->dev, "Invalid VLAN number: %d\n", (vlan_id & HINIC_VLAN_ID_MASK)); return -EINVAL; } mac_info.func_id = func_id; mac_info.vlan_id = vlan_id; memcpy(mac_info.old_mac, old_mac, ETH_ALEN); memcpy(mac_info.new_mac, new_mac, ETH_ALEN); err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_UPDATE_MAC, &mac_info, sizeof(mac_info), &mac_info, &out_size); if (err || !out_size || hinic_check_mac_info(mac_info.status, mac_info.vlan_id)) { dev_err(&hwdev->hwif->pdev->dev, "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n", err, mac_info.status, out_size); return -EINVAL; } if (mac_info.status == HINIC_MGMT_STATUS_EXIST) dev_warn(&hwdev->hwif->pdev->dev, "MAC is repeated. Ignore update operation\n"); return 0; } static void hinic_get_vf_config(struct hinic_hwdev *hwdev, u16 vf_id, struct ifla_vf_info *ivi) { struct vf_data_storage *vfinfo; vfinfo = hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id); ivi->vf = HW_VF_ID_TO_OS(vf_id); memcpy(ivi->mac, vfinfo->vf_mac_addr, ETH_ALEN); ivi->vlan = vfinfo->pf_vlan; ivi->qos = vfinfo->pf_qos; ivi->spoofchk = vfinfo->spoofchk; ivi->trusted = vfinfo->trust; ivi->max_tx_rate = vfinfo->max_rate; ivi->min_tx_rate = vfinfo->min_rate; if (!vfinfo->link_forced) ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; else if (vfinfo->link_up) ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; else ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; } int hinic_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_sriov_info *sriov_info; sriov_info = &nic_dev->sriov_info; if (vf >= sriov_info->num_vfs) return -EINVAL; hinic_get_vf_config(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), ivi); return 0; } static int hinic_set_vf_mac(struct hinic_hwdev *hwdev, int vf, unsigned char *mac_addr) { struct hinic_func_to_io *nic_io = &hwdev->func_to_io; struct vf_data_storage *vf_info; u16 func_id; int err; vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf); /* duplicate request, so just return success */ if (vf_info->pf_set_mac && !memcmp(vf_info->vf_mac_addr, mac_addr, ETH_ALEN)) return 0; vf_info->pf_set_mac = true; func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf; err = hinic_update_mac(hwdev, vf_info->vf_mac_addr, mac_addr, 0, func_id); if (err) { vf_info->pf_set_mac = false; return err; } memcpy(vf_info->vf_mac_addr, mac_addr, ETH_ALEN); return 0; } int hinic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_sriov_info *sriov_info; int err; sriov_info = &nic_dev->sriov_info; if (!is_valid_ether_addr(mac) || vf >= sriov_info->num_vfs) return -EINVAL; err = hinic_set_vf_mac(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), mac); if (err) return err; netif_info(nic_dev, drv, netdev, "Setting MAC %pM on VF %d\n", mac, vf); netif_info(nic_dev, drv, netdev, "Reload the VF driver to make this change effective."); return 0; } static int hinic_add_vf_vlan(struct hinic_hwdev *hwdev, int vf_id, u16 vlan, u8 qos) { struct hinic_func_to_io *nic_io = &hwdev->func_to_io; int err; err = hinic_set_vf_vlan(hwdev, true, vlan, qos, vf_id); if (err) return err; nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = vlan; nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = qos; dev_info(&hwdev->hwif->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, HW_VF_ID_TO_OS(vf_id)); return 0; } static int hinic_kill_vf_vlan(struct hinic_hwdev *hwdev, int vf_id) { struct hinic_func_to_io *nic_io = &hwdev->func_to_io; int err; err = hinic_set_vf_vlan(hwdev, false, nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan, nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos, vf_id); if (err) return err; dev_info(&hwdev->hwif->pdev->dev, "Remove VLAN %d on VF %d\n", nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan, HW_VF_ID_TO_OS(vf_id)); nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = 0; nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = 0; return 0; } static int hinic_update_mac_vlan(struct hinic_dev *nic_dev, u16 old_vlan, u16 new_vlan, int vf_id) { struct vf_data_storage *vf_info; u16 vlan_id; int err; if (!nic_dev || old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID) return -EINVAL; vf_info = nic_dev->hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id); if (!vf_info->pf_set_mac) return 0; vlan_id = old_vlan; if (vlan_id) vlan_id |= HINIC_ADD_VLAN_IN_MAC; err = hinic_port_del_mac(nic_dev, vf_info->vf_mac_addr, vlan_id); if (err) { dev_err(&nic_dev->hwdev->hwif->pdev->dev, "Failed to delete VF %d MAC %pM vlan %d\n", HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, old_vlan); return err; } vlan_id = new_vlan; if (vlan_id) vlan_id |= HINIC_ADD_VLAN_IN_MAC; err = hinic_port_add_mac(nic_dev, vf_info->vf_mac_addr, vlan_id); if (err) { dev_err(&nic_dev->hwdev->hwif->pdev->dev, "Failed to add VF %d MAC %pM vlan %d\n", HW_VF_ID_TO_OS(vf_id), vf_info->vf_mac_addr, new_vlan); goto out; } return 0; out: vlan_id = old_vlan; if (vlan_id) vlan_id |= HINIC_ADD_VLAN_IN_MAC; hinic_port_add_mac(nic_dev, vf_info->vf_mac_addr, vlan_id); return err; } static int set_hw_vf_vlan(struct hinic_dev *nic_dev, u16 cur_vlanprio, int vf, u16 vlan, u8 qos) { u16 old_vlan = cur_vlanprio & VLAN_VID_MASK; int err = 0; if (vlan || qos) { if (cur_vlanprio) { err = hinic_kill_vf_vlan(nic_dev->hwdev, OS_VF_ID_TO_HW(vf)); if (err) { dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to delete vf %d old vlan %d\n", vf, old_vlan); goto out; } } err = hinic_add_vf_vlan(nic_dev->hwdev, OS_VF_ID_TO_HW(vf), vlan, qos); if (err) { dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to add vf %d new vlan %d\n", vf, vlan); goto out; } } else { err = hinic_kill_vf_vlan(nic_dev->hwdev, OS_VF_ID_TO_HW(vf)); if (err) { dev_err(&nic_dev->sriov_info.pdev->dev, "Failed to delete vf %d vlan %d\n", vf, old_vlan); goto out; } } err = hinic_update_mac_vlan(nic_dev, old_vlan, vlan, OS_VF_ID_TO_HW(vf)); out: return err; } int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) { struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_sriov_info *sriov_info; u16 vlanprio, cur_vlanprio; sriov_info = &nic_dev->sriov_info; if (vf >= sriov_info->num_vfs || vlan >= VLAN_N_VID || qos > HINIC_MAX_QOS) return -EINVAL; if (vlan_proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; vlanprio = vlan | qos << HINIC_VLAN_PRIORITY_SHIFT; cur_vlanprio = hinic_vf_info_vlanprio(nic_dev->hwdev, OS_VF_ID_TO_HW(vf)); /* duplicate request, so just return success */ if (vlanprio == cur_vlanprio) return 0; return set_hw_vf_vlan(nic_dev, cur_vlanprio, vf, vlan, qos); } static int hinic_set_vf_trust(struct hinic_hwdev *hwdev, u16 vf_id, bool trust) { struct vf_data_storage *vf_infos; struct hinic_func_to_io *nic_io; if (!hwdev) return -EINVAL; nic_io = &hwdev->func_to_io; vf_infos = nic_io->vf_infos; vf_infos[vf_id].trust = trust; return 0; } int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) { struct hinic_dev *adapter = netdev_priv(netdev); struct hinic_sriov_info *sriov_info; struct hinic_func_to_io *nic_io; bool cur_trust; int err; sriov_info = &adapter->sriov_info; nic_io = &adapter->hwdev->func_to_io; if (vf >= sriov_info->num_vfs) return -EINVAL; cur_trust = nic_io->vf_infos[vf].trust; /* same request, so just return success */ if (setting == cur_trust) return 0; err = hinic_set_vf_trust(adapter->hwdev, vf, setting); if (!err) dev_info(&sriov_info->pdev->dev, "Set VF %d trusted %s succeed\n", vf, setting ? "on" : "off"); else dev_err(&sriov_info->pdev->dev, "Failed set VF %d trusted %s\n", vf, setting ? "on" : "off"); return err; } int hinic_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate) { static const u32 speeds[] = { SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, SPEED_25000, SPEED_40000, SPEED_100000 }; struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_port_cap port_cap = { 0 }; enum hinic_port_link_state link_state; int err; if (vf >= nic_dev->sriov_info.num_vfs) { netif_err(nic_dev, drv, netdev, "VF number must be less than %d\n", nic_dev->sriov_info.num_vfs); return -EINVAL; } err = hinic_port_link_state(nic_dev, &link_state); if (err) { netif_err(nic_dev, drv, netdev, "Get link status failed when setting vf tx rate\n"); return -EIO; } if (link_state == HINIC_LINK_STATE_DOWN) { netif_err(nic_dev, drv, netdev, "Link status must be up when setting vf tx rate\n"); return -EPERM; } err = hinic_port_get_cap(nic_dev, &port_cap); if (err || port_cap.speed > LINK_SPEED_100GB) return -EIO; /* rate limit cannot be less than 0 and greater than link speed */ if (max_tx_rate < 0 || max_tx_rate > speeds[port_cap.speed]) { netif_err(nic_dev, drv, netdev, "Max tx rate must be in [0 - %d]\n", speeds[port_cap.speed]); return -EINVAL; } err = hinic_set_vf_tx_rate(nic_dev->hwdev, OS_VF_ID_TO_HW(vf), max_tx_rate, min_tx_rate); if (err) { netif_err(nic_dev, drv, netdev, "Unable to set VF %d max rate %d min rate %d%s\n", vf, max_tx_rate, min_tx_rate, err == HINIC_TX_RATE_TABLE_FULL ? ", tx rate profile is full" : ""); return -EIO; } netif_info(nic_dev, drv, netdev, "Set VF %d max tx rate %d min tx rate %d successfully\n", vf, max_tx_rate, min_tx_rate); return 0; } static int hinic_set_vf_spoofchk(struct hinic_hwdev *hwdev, u16 vf_id, bool spoofchk) { struct hinic_spoofchk_set spoofchk_cfg = {0}; struct vf_data_storage *vf_infos = NULL; u16 out_size = sizeof(spoofchk_cfg); int err; if (!hwdev) return -EINVAL; vf_infos = hwdev->func_to_io.vf_infos; spoofchk_cfg.func_id = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id; spoofchk_cfg.state = spoofchk ? 1 : 0; err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ENABLE_SPOOFCHK, &spoofchk_cfg, sizeof(spoofchk_cfg), &spoofchk_cfg, &out_size); if (spoofchk_cfg.status == HINIC_MGMT_CMD_UNSUPPORTED) { err = HINIC_MGMT_CMD_UNSUPPORTED; } else if (err || !out_size || spoofchk_cfg.status) { dev_err(&hwdev->hwif->pdev->dev, "Failed to set VF(%d) spoofchk, err: %d, status: 0x%x, out size: 0x%x\n", HW_VF_ID_TO_OS(vf_id), err, spoofchk_cfg.status, out_size); err = -EIO; } vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk = spoofchk; return err; } int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) { struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_sriov_info *sriov_info; bool cur_spoofchk; int err; sriov_info = &nic_dev->sriov_info; if (vf >= sriov_info->num_vfs) return -EINVAL; cur_spoofchk = nic_dev->hwdev->func_to_io.vf_infos[vf].spoofchk; /* same request, so just return success */ if (setting == cur_spoofchk) return 0; err = hinic_set_vf_spoofchk(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), setting); if (!err) { netif_info(nic_dev, drv, netdev, "Set VF %d spoofchk %s successfully\n", vf, setting ? "on" : "off"); } else if (err == HINIC_MGMT_CMD_UNSUPPORTED) { netif_err(nic_dev, drv, netdev, "Current firmware doesn't support to set vf spoofchk, need to upgrade latest firmware version\n"); err = -EOPNOTSUPP; } return err; } static int hinic_set_vf_link_state(struct hinic_hwdev *hwdev, u16 vf_id, int link) { struct hinic_func_to_io *nic_io = &hwdev->func_to_io; struct vf_data_storage *vf_infos = nic_io->vf_infos; u8 link_status = 0; switch (link) { case HINIC_IFLA_VF_LINK_STATE_AUTO: vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false; vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = nic_io->link_status ? true : false; link_status = nic_io->link_status; break; case HINIC_IFLA_VF_LINK_STATE_ENABLE: vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true; vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = true; link_status = HINIC_LINK_UP; break; case HINIC_IFLA_VF_LINK_STATE_DISABLE: vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true; vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = false; link_status = HINIC_LINK_DOWN; break; default: return -EINVAL; } /* Notify the VF of its new link state */ hinic_notify_vf_link_status(hwdev, vf_id, link_status); return 0; } int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) { struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_sriov_info *sriov_info; sriov_info = &nic_dev->sriov_info; if (vf_id >= sriov_info->num_vfs) { netif_err(nic_dev, drv, netdev, "Invalid VF Identifier %d\n", vf_id); return -EINVAL; } return hinic_set_vf_link_state(sriov_info->hwdev, OS_VF_ID_TO_HW(vf_id), link); } /* pf receive message from vf */ static int nic_pf_mbox_handler(void *hwdev, u16 vf_id, u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { u8 size = ARRAY_SIZE(nic_cmd_support_vf); struct vf_cmd_msg_handle *vf_msg_handle; struct hinic_hwdev *dev = hwdev; struct hinic_func_to_io *nic_io; struct hinic_pfhwdev *pfhwdev; int err = 0; u32 i; if (!hwdev) return -EINVAL; if (!hinic_mbox_check_cmd_valid(hwdev, nic_cmd_support_vf, vf_id, cmd, buf_in, in_size, size)) { dev_err(&dev->hwif->pdev->dev, "PF Receive VF nic cmd: 0x%x, mbox len: 0x%x is invalid\n", cmd, in_size); return HINIC_MBOX_VF_CMD_ERROR; } pfhwdev = container_of(dev, struct hinic_pfhwdev, hwdev); nic_io = &dev->func_to_io; for (i = 0; i < ARRAY_SIZE(nic_vf_cmd_msg_handler); i++) { vf_msg_handle = &nic_vf_cmd_msg_handler[i]; if (cmd == vf_msg_handle->cmd && vf_msg_handle->cmd_msg_handler) { err = vf_msg_handle->cmd_msg_handler(hwdev, vf_id, buf_in, in_size, buf_out, out_size); break; } } if (i == ARRAY_SIZE(nic_vf_cmd_msg_handler)) err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, cmd, buf_in, in_size, buf_out, out_size, HINIC_MGMT_MSG_SYNC); if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW) dev_err(&nic_io->hwif->pdev->dev, "PF receive VF L2NIC cmd: %d process error, err:%d\n", cmd, err); return err; } static int cfg_mbx_pf_proc_vf_msg(void *hwdev, u16 vf_id, u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { struct hinic_dev_cap *dev_cap = buf_out; struct hinic_hwdev *dev = hwdev; struct hinic_cap *cap; cap = &dev->nic_cap; memset(dev_cap, 0, sizeof(*dev_cap)); dev_cap->max_vf = cap->max_vf; dev_cap->max_sqs = cap->max_vf_qps; dev_cap->max_rqs = cap->max_vf_qps; dev_cap->port_id = dev->port_id; *out_size = sizeof(*dev_cap); return 0; } static int hinic_init_vf_infos(struct hinic_func_to_io *nic_io, u16 vf_id) { struct vf_data_storage *vf_infos = nic_io->vf_infos; if (set_vf_link_state > HINIC_IFLA_VF_LINK_STATE_DISABLE) { dev_warn(&nic_io->hwif->pdev->dev, "Module Parameter set_vf_link_state value %d is out of range, resetting to %d\n", set_vf_link_state, HINIC_IFLA_VF_LINK_STATE_AUTO); set_vf_link_state = HINIC_IFLA_VF_LINK_STATE_AUTO; } switch (set_vf_link_state) { case HINIC_IFLA_VF_LINK_STATE_AUTO: vf_infos[vf_id].link_forced = false; break; case HINIC_IFLA_VF_LINK_STATE_ENABLE: vf_infos[vf_id].link_forced = true; vf_infos[vf_id].link_up = true; break; case HINIC_IFLA_VF_LINK_STATE_DISABLE: vf_infos[vf_id].link_forced = true; vf_infos[vf_id].link_up = false; break; default: dev_err(&nic_io->hwif->pdev->dev, "Invalid input parameter set_vf_link_state: %d\n", set_vf_link_state); return -EINVAL; } return 0; } static void hinic_clear_vf_infos(struct hinic_dev *nic_dev, u16 vf_id) { struct vf_data_storage *vf_infos; vf_infos = nic_dev->hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id); if (vf_infos->pf_set_mac) hinic_port_del_mac(nic_dev, vf_infos->vf_mac_addr, 0); if (hinic_vf_info_vlanprio(nic_dev->hwdev, vf_id)) hinic_kill_vf_vlan(nic_dev->hwdev, vf_id); if (vf_infos->max_rate) hinic_set_vf_tx_rate(nic_dev->hwdev, vf_id, 0, 0); if (vf_infos->spoofchk) hinic_set_vf_spoofchk(nic_dev->hwdev, vf_id, false); if (vf_infos->trust) hinic_set_vf_trust(nic_dev->hwdev, vf_id, false); memset(vf_infos, 0, sizeof(*vf_infos)); /* set vf_infos to default */ hinic_init_vf_infos(&nic_dev->hwdev->func_to_io, HW_VF_ID_TO_OS(vf_id)); } static void hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info, u16 start_vf_id, u16 end_vf_id) { struct hinic_dev *nic_dev; u16 func_idx, idx; nic_dev = container_of(sriov_info, struct hinic_dev, sriov_info); for (idx = start_vf_id; idx <= end_vf_id; idx++) { func_idx = hinic_glb_pf_vf_offset(nic_dev->hwdev->hwif) + idx; hinic_set_wq_page_size(nic_dev->hwdev, func_idx, HINIC_HW_WQ_PAGE_SIZE); hinic_clear_vf_infos(nic_dev, idx); } } int hinic_vf_func_init(struct hinic_hwdev *hwdev) { struct hinic_register_vf register_info = {0}; u16 out_size = sizeof(register_info); struct hinic_func_to_io *nic_io; int err = 0; u32 size, i; err = hinic_vf_mbox_random_id_init(hwdev); if (err) { dev_err(&hwdev->hwif->pdev->dev, "Failed to init vf mbox random id, err: %d\n", err); return err; } nic_io = &hwdev->func_to_io; if (HINIC_IS_VF(hwdev->hwif)) { err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC, HINIC_PORT_CMD_VF_REGISTER, &register_info, sizeof(register_info), &register_info, &out_size, 0); if (err || register_info.status || !out_size) { dev_err(&hwdev->hwif->pdev->dev, "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n", err, register_info.status, out_size); return -EIO; } } else { err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_CFGM, cfg_mbx_pf_proc_vf_msg); if (err) { dev_err(&hwdev->hwif->pdev->dev, "Register PF mailbox callback failed\n"); return err; } nic_io->max_vfs = hwdev->nic_cap.max_vf; size = sizeof(*nic_io->vf_infos) * nic_io->max_vfs; if (size != 0) { nic_io->vf_infos = kzalloc(size, GFP_KERNEL); if (!nic_io->vf_infos) { err = -ENOMEM; goto out_free_nic_io; } for (i = 0; i < nic_io->max_vfs; i++) { err = hinic_init_vf_infos(nic_io, i); if (err) goto err_init_vf_infos; } err = hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC, nic_pf_mbox_handler); if (err) goto err_register_pf_mbox_cb; } } return 0; err_register_pf_mbox_cb: err_init_vf_infos: kfree(nic_io->vf_infos); out_free_nic_io: return err; } void hinic_vf_func_free(struct hinic_hwdev *hwdev) { struct hinic_register_vf unregister = {0}; u16 out_size = sizeof(unregister); int err; if (HINIC_IS_VF(hwdev->hwif)) { err = hinic_mbox_to_pf(hwdev, HINIC_MOD_L2NIC, HINIC_PORT_CMD_VF_UNREGISTER, &unregister, sizeof(unregister), &unregister, &out_size, 0); if (err || !out_size || unregister.status) dev_err(&hwdev->hwif->pdev->dev, "Failed to unregister VF, err: %d, status: 0x%x, out_size: 0x%x\n", err, unregister.status, out_size); } else { if (hwdev->func_to_io.vf_infos) { hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_L2NIC); kfree(hwdev->func_to_io.vf_infos); } } } static int hinic_init_vf_hw(struct hinic_hwdev *hwdev, u16 start_vf_id, u16 end_vf_id) { u16 i, func_idx; int err; /* vf use 256K as default wq page size, and can't change it */ for (i = start_vf_id; i <= end_vf_id; i++) { func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + i; err = hinic_set_wq_page_size(hwdev, func_idx, HINIC_DEFAULT_WQ_PAGE_SIZE); if (err) return err; } return 0; } int hinic_pci_sriov_disable(struct pci_dev *pdev) { struct hinic_sriov_info *sriov_info; u16 tmp_vfs; sriov_info = hinic_get_sriov_info_by_pcidev(pdev); /* if SR-IOV is already disabled then nothing will be done */ if (!sriov_info->sriov_enabled) return 0; set_bit(HINIC_SRIOV_DISABLE, &sriov_info->state); /* If our VFs are assigned we cannot shut down SR-IOV * without causing issues, so just leave the hardware * available but disabled */ if (pci_vfs_assigned(sriov_info->pdev)) { clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state); dev_warn(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); return -EPERM; } sriov_info->sriov_enabled = false; /* disable iov and allow time for transactions to clear */ pci_disable_sriov(sriov_info->pdev); tmp_vfs = (u16)sriov_info->num_vfs; sriov_info->num_vfs = 0; hinic_deinit_vf_hw(sriov_info, OS_VF_ID_TO_HW(0), OS_VF_ID_TO_HW(tmp_vfs - 1)); clear_bit(HINIC_SRIOV_DISABLE, &sriov_info->state); return 0; } static int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) { struct hinic_sriov_info *sriov_info; int err; sriov_info = hinic_get_sriov_info_by_pcidev(pdev); if (test_and_set_bit(HINIC_SRIOV_ENABLE, &sriov_info->state)) { dev_err(&pdev->dev, "SR-IOV enable in process, please wait, num_vfs %d\n", num_vfs); return -EPERM; } err = hinic_init_vf_hw(sriov_info->hwdev, OS_VF_ID_TO_HW(0), OS_VF_ID_TO_HW((u16)num_vfs - 1)); if (err) { dev_err(&sriov_info->pdev->dev, "Failed to init vf in hardware before enable sriov, error %d\n", err); clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state); return err; } err = pci_enable_sriov(sriov_info->pdev, num_vfs); if (err) { dev_err(&pdev->dev, "Failed to enable SR-IOV, error %d\n", err); clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state); return err; } sriov_info->sriov_enabled = true; sriov_info->num_vfs = num_vfs; clear_bit(HINIC_SRIOV_ENABLE, &sriov_info->state); return num_vfs; } int hinic_pci_sriov_configure(struct pci_dev *dev, int num_vfs) { struct hinic_sriov_info *sriov_info; sriov_info = hinic_get_sriov_info_by_pcidev(dev); if (test_bit(HINIC_FUNC_REMOVE, &sriov_info->state)) return -EBUSY; if (!num_vfs) return hinic_pci_sriov_disable(dev); else return hinic_pci_sriov_enable(dev, num_vfs); }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_sriov.c
// SPDX-License-Identifier: GPL-2.0-only /* * Huawei HiNIC PCI Express Linux driver * Copyright(c) 2017 Huawei Technologies Co., Ltd */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/u64_stats_sync.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/skbuff.h> #include <linux/dma-mapping.h> #include <linux/prefetch.h> #include <linux/cpumask.h> #include <linux/if_vlan.h> #include <asm/barrier.h> #include "hinic_common.h" #include "hinic_hw_if.h" #include "hinic_hw_wqe.h" #include "hinic_hw_wq.h" #include "hinic_hw_qp.h" #include "hinic_hw_dev.h" #include "hinic_rx.h" #include "hinic_dev.h" #define RX_IRQ_NO_PENDING 0 #define RX_IRQ_NO_COALESC 0 #define RX_IRQ_NO_LLI_TIMER 0 #define RX_IRQ_NO_CREDIT 0 #define RX_IRQ_NO_RESEND_TIMER 0 #define HINIC_RX_BUFFER_WRITE 16 #define HINIC_RX_IPV6_PKT 7 #define LRO_PKT_HDR_LEN_IPV4 66 #define LRO_PKT_HDR_LEN_IPV6 86 #define LRO_REPLENISH_THLD 256 #define LRO_PKT_HDR_LEN(cqe) \ (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \ HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4) /** * hinic_rxq_clean_stats - Clean the statistics of specific queue * @rxq: Logical Rx Queue **/ static void hinic_rxq_clean_stats(struct hinic_rxq *rxq) { struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; u64_stats_update_begin(&rxq_stats->syncp); rxq_stats->pkts = 0; rxq_stats->bytes = 0; rxq_stats->errors = 0; rxq_stats->csum_errors = 0; rxq_stats->other_errors = 0; u64_stats_update_end(&rxq_stats->syncp); } /** * hinic_rxq_get_stats - get statistics of Rx Queue * @rxq: Logical Rx Queue * @stats: return updated stats here **/ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) { struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; unsigned int start; do { start = u64_stats_fetch_begin(&rxq_stats->syncp); stats->pkts = rxq_stats->pkts; stats->bytes = rxq_stats->bytes; stats->errors = rxq_stats->csum_errors + rxq_stats->other_errors; stats->csum_errors = rxq_stats->csum_errors; stats->other_errors = rxq_stats->other_errors; } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); } /** * rxq_stats_init - Initialize the statistics of specific queue * @rxq: Logical Rx Queue **/ static void rxq_stats_init(struct hinic_rxq *rxq) { struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; u64_stats_init(&rxq_stats->syncp); hinic_rxq_clean_stats(rxq); } static void rx_csum(struct hinic_rxq *rxq, u32 status, struct sk_buff *skb) { struct net_device *netdev = rxq->netdev; u32 csum_err; csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR); if (!(netdev->features & NETIF_F_RXCSUM)) return; if (!csum_err) { skb->ip_summed = CHECKSUM_UNNECESSARY; } else { if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE | HINIC_RX_CSUM_IPSU_OTHER_ERR))) rxq->rxq_stats.csum_errors++; skb->ip_summed = CHECKSUM_NONE; } } /** * rx_alloc_skb - allocate skb and map it to dma address * @rxq: rx queue * @dma_addr: returned dma address for the skb * * Return skb **/ static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq, dma_addr_t *dma_addr) { struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; struct sk_buff *skb; dma_addr_t addr; int err; skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz); if (!skb) return NULL; addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz, DMA_FROM_DEVICE); err = dma_mapping_error(&pdev->dev, addr); if (err) { dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err); goto err_rx_map; } *dma_addr = addr; return skb; err_rx_map: dev_kfree_skb_any(skb); return NULL; } /** * rx_unmap_skb - unmap the dma address of the skb * @rxq: rx queue * @dma_addr: dma address of the skb **/ static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr) { struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz, DMA_FROM_DEVICE); } /** * rx_free_skb - unmap and free skb * @rxq: rx queue * @skb: skb to free * @dma_addr: dma address of the skb **/ static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb, dma_addr_t dma_addr) { rx_unmap_skb(rxq, dma_addr); dev_kfree_skb_any(skb); } /** * rx_alloc_pkts - allocate pkts in rx queue * @rxq: rx queue * * Return number of skbs allocated **/ static int rx_alloc_pkts(struct hinic_rxq *rxq) { struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); struct hinic_rq_wqe *rq_wqe; unsigned int free_wqebbs; struct hinic_sge sge; dma_addr_t dma_addr; struct sk_buff *skb; u16 prod_idx; int i; free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq); /* Limit the allocation chunks */ if (free_wqebbs > nic_dev->rx_weight) free_wqebbs = nic_dev->rx_weight; for (i = 0; i < free_wqebbs; i++) { skb = rx_alloc_skb(rxq, &dma_addr); if (!skb) goto skb_out; hinic_set_sge(&sge, dma_addr, skb->len); rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &prod_idx); if (!rq_wqe) { rx_free_skb(rxq, skb, dma_addr); goto skb_out; } hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge); hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb); } skb_out: if (i) { wmb(); /* write all the wqes before update PI */ hinic_rq_update(rxq->rq, prod_idx); } return i; } /** * free_all_rx_skbs - free all skbs in rx queue * @rxq: rx queue **/ static void free_all_rx_skbs(struct hinic_rxq *rxq) { struct hinic_rq *rq = rxq->rq; struct hinic_hw_wqe *hw_wqe; struct hinic_sge sge; u16 ci; while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) { if (IS_ERR(hw_wqe)) break; hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge); hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE); rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge)); } } /** * rx_recv_jumbo_pkt - Rx handler for jumbo pkt * @rxq: rx queue * @head_skb: the first skb in the list * @left_pkt_len: left size of the pkt exclude head skb * @ci: consumer index * * Return number of wqes that used for the left of the pkt **/ static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb, unsigned int left_pkt_len, u16 ci) { struct sk_buff *skb, *curr_skb = head_skb; struct hinic_rq_wqe *rq_wqe; unsigned int curr_len; struct hinic_sge sge; int num_wqes = 0; while (left_pkt_len > 0) { rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb, &ci); num_wqes++; hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); prefetch(skb->data); curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ : left_pkt_len; left_pkt_len -= curr_len; __skb_put(skb, curr_len); if (curr_skb == head_skb) skb_shinfo(head_skb)->frag_list = skb; else curr_skb->next = skb; head_skb->len += skb->len; head_skb->data_len += skb->len; head_skb->truesize += skb->truesize; curr_skb = skb; } return num_wqes; } static void hinic_copy_lp_data(struct hinic_dev *nic_dev, struct sk_buff *skb) { struct net_device *netdev = nic_dev->netdev; u8 *lb_buf = nic_dev->lb_test_rx_buf; int lb_len = nic_dev->lb_pkt_len; int pkt_offset, frag_len, i; void *frag_data = NULL; if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) { nic_dev->lb_test_rx_idx = 0; netif_warn(nic_dev, drv, netdev, "Loopback test warning, receive too more test pkts\n"); } if (skb->len != nic_dev->lb_pkt_len) { netif_warn(nic_dev, drv, netdev, "Wrong packet length\n"); nic_dev->lb_test_rx_idx++; return; } pkt_offset = nic_dev->lb_test_rx_idx * lb_len; frag_len = (int)skb_headlen(skb); memcpy(lb_buf + pkt_offset, skb->data, frag_len); pkt_offset += frag_len; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]); frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]); memcpy((lb_buf + pkt_offset), frag_data, frag_len); pkt_offset += frag_len; } nic_dev->lb_test_rx_idx++; } /** * rxq_recv - Rx handler * @rxq: rx queue * @budget: maximum pkts to process * * Return number of pkts received **/ static int rxq_recv(struct hinic_rxq *rxq, int budget) { struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq); struct net_device *netdev = rxq->netdev; u64 pkt_len = 0, rx_bytes = 0; struct hinic_rq *rq = rxq->rq; struct hinic_rq_wqe *rq_wqe; struct hinic_dev *nic_dev; unsigned int free_wqebbs; struct hinic_rq_cqe *cqe; int num_wqes, pkts = 0; struct hinic_sge sge; unsigned int status; struct sk_buff *skb; u32 offload_type; u16 ci, num_lro; u16 num_wqe = 0; u32 vlan_len; u16 vid; nic_dev = netdev_priv(netdev); while (pkts < budget) { num_wqes = 0; rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb, &ci); if (!rq_wqe) break; /* make sure we read rx_done before packet length */ dma_rmb(); cqe = rq->cqe[ci]; status = be32_to_cpu(cqe->status); hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); rx_csum(rxq, status, skb); prefetch(skb->data); pkt_len = sge.len; if (pkt_len <= HINIC_RX_BUF_SZ) { __skb_put(skb, pkt_len); } else { __skb_put(skb, HINIC_RX_BUF_SZ); num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len - HINIC_RX_BUF_SZ, ci); } hinic_rq_put_wqe(rq, ci, (num_wqes + 1) * HINIC_RQ_WQE_SIZE); offload_type = be32_to_cpu(cqe->offload_type); vlan_len = be32_to_cpu(cqe->len); if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) { vid = HINIC_GET_RX_VLAN_TAG(vlan_len); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } if (unlikely(nic_dev->flags & HINIC_LP_TEST)) hinic_copy_lp_data(nic_dev, skb); skb_record_rx_queue(skb, qp->q_id); skb->protocol = eth_type_trans(skb, rxq->netdev); napi_gro_receive(&rxq->napi, skb); pkts++; rx_bytes += pkt_len; num_lro = HINIC_GET_RX_NUM_LRO(status); if (num_lro) { rx_bytes += ((num_lro - 1) * LRO_PKT_HDR_LEN(cqe)); num_wqe += (u16)(pkt_len >> rxq->rx_buff_shift) + ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); } cqe->status = 0; if (num_wqe >= LRO_REPLENISH_THLD) break; } free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq); if (free_wqebbs > HINIC_RX_BUFFER_WRITE) rx_alloc_pkts(rxq); u64_stats_update_begin(&rxq->rxq_stats.syncp); rxq->rxq_stats.pkts += pkts; rxq->rxq_stats.bytes += rx_bytes; u64_stats_update_end(&rxq->rxq_stats.syncp); return pkts; } static int rx_poll(struct napi_struct *napi, int budget) { struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi); struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); struct hinic_rq *rq = rxq->rq; int pkts; pkts = rxq_recv(rxq, budget); if (pkts >= budget) return budget; napi_complete(napi); if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) hinic_hwdev_set_msix_state(nic_dev->hwdev, rq->msix_entry, HINIC_MSIX_ENABLE); return pkts; } static void rx_add_napi(struct hinic_rxq *rxq) { struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); netif_napi_add_weight(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight); napi_enable(&rxq->napi); } static void rx_del_napi(struct hinic_rxq *rxq) { napi_disable(&rxq->napi); netif_napi_del(&rxq->napi); } static irqreturn_t rx_irq(int irq, void *data) { struct hinic_rxq *rxq = (struct hinic_rxq *)data; struct hinic_rq *rq = rxq->rq; struct hinic_dev *nic_dev; /* Disable the interrupt until napi will be completed */ nic_dev = netdev_priv(rxq->netdev); if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) hinic_hwdev_set_msix_state(nic_dev->hwdev, rq->msix_entry, HINIC_MSIX_DISABLE); nic_dev = netdev_priv(rxq->netdev); hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry); napi_schedule(&rxq->napi); return IRQ_HANDLED; } static int rx_request_irq(struct hinic_rxq *rxq) { struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); struct hinic_msix_config interrupt_info = {0}; struct hinic_intr_coal_info *intr_coal = NULL; struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_rq *rq = rxq->rq; struct hinic_qp *qp; int err; qp = container_of(rq, struct hinic_qp, rq); rx_add_napi(rxq); hinic_hwdev_msix_set(hwdev, rq->msix_entry, RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC, RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT, RX_IRQ_NO_RESEND_TIMER); intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id]; interrupt_info.msix_index = rq->msix_entry; interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg; interrupt_info.pending_cnt = intr_coal->pending_limt; interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg; err = hinic_set_interrupt_cfg(hwdev, &interrupt_info); if (err) { netif_err(nic_dev, drv, rxq->netdev, "Failed to set RX interrupt coalescing attribute\n"); goto err_req_irq; } err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq); if (err) goto err_req_irq; cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask); err = irq_set_affinity_and_hint(rq->irq, &rq->affinity_mask); if (err) goto err_irq_affinity; return 0; err_irq_affinity: free_irq(rq->irq, rxq); err_req_irq: rx_del_napi(rxq); return err; } static void rx_free_irq(struct hinic_rxq *rxq) { struct hinic_rq *rq = rxq->rq; irq_update_affinity_hint(rq->irq, NULL); free_irq(rq->irq, rxq); rx_del_napi(rxq); } /** * hinic_init_rxq - Initialize the Rx Queue * @rxq: Logical Rx Queue * @rq: Hardware Rx Queue to connect the Logical queue with * @netdev: network device to connect the Logical queue with * * Return 0 - Success, negative - Failure **/ int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, struct net_device *netdev) { struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq); int err, pkts; rxq->netdev = netdev; rxq->rq = rq; rxq->buf_len = HINIC_RX_BUF_SZ; rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ); rxq_stats_init(rxq); rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL, "%s_rxq%d", netdev->name, qp->q_id); if (!rxq->irq_name) return -ENOMEM; pkts = rx_alloc_pkts(rxq); if (!pkts) { err = -ENOMEM; goto err_rx_pkts; } err = rx_request_irq(rxq); if (err) { netdev_err(netdev, "Failed to request Rx irq\n"); goto err_req_rx_irq; } return 0; err_req_rx_irq: err_rx_pkts: free_all_rx_skbs(rxq); devm_kfree(&netdev->dev, rxq->irq_name); return err; } /** * hinic_clean_rxq - Clean the Rx Queue * @rxq: Logical Rx Queue **/ void hinic_clean_rxq(struct hinic_rxq *rxq) { struct net_device *netdev = rxq->netdev; rx_free_irq(rxq); free_all_rx_skbs(rxq); devm_kfree(&netdev->dev, rxq->irq_name); }
linux-master
drivers/net/ethernet/huawei/hinic/hinic_rx.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. */ #include "ena_eth_com.h" static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( struct ena_com_io_cq *io_cq) { struct ena_eth_io_rx_cdesc_base *cdesc; u16 expected_phase, head_masked; u16 desc_phase; head_masked = io_cq->head & (io_cq->q_depth - 1); expected_phase = io_cq->phase; cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr + (head_masked * io_cq->cdesc_entry_size_in_bytes)); desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; if (desc_phase != expected_phase) return NULL; /* Make sure we read the rest of the descriptor after the phase bit * has been read */ dma_rmb(); return cdesc; } static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq) { u16 tail_masked; u32 offset; tail_masked = io_sq->tail & (io_sq->q_depth - 1); offset = tail_masked * io_sq->desc_entry_size; return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset); } static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, u8 *bounce_buffer) { struct ena_com_llq_info *llq_info = &io_sq->llq_info; u16 dst_tail_mask; u32 dst_offset; dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); dst_offset = dst_tail_mask * llq_info->desc_list_entry_size; if (is_llq_max_tx_burst_exists(io_sq)) { if (unlikely(!io_sq->entries_in_tx_burst_left)) { netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Error: trying to send more packets than tx burst allows\n"); return -ENOSPC; } io_sq->entries_in_tx_burst_left--; netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid, io_sq->entries_in_tx_burst_left); } /* Make sure everything was written into the bounce buffer before * writing the bounce buffer to the device */ wmb(); /* The line is completed. Copy it to dev */ __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer, (llq_info->desc_list_entry_size) / 8); io_sq->tail++; /* Switch phase bit in case of wrap around */ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) io_sq->phase ^= 1; return 0; } static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, u8 *header_src, u16 header_len) { struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; struct ena_com_llq_info *llq_info = &io_sq->llq_info; u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf; u16 header_offset; if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) return 0; header_offset = llq_info->descs_num_before_header * io_sq->desc_entry_size; if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) { netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Trying to write header larger than llq entry can accommodate\n"); return -EFAULT; } if (unlikely(!bounce_buffer)) { netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n"); return -EFAULT; } memcpy(bounce_buffer + header_offset, header_src, header_len); return 0; } static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) { struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; u8 *bounce_buffer; void *sq_desc; bounce_buffer = pkt_ctrl->curr_bounce_buf; if (unlikely(!bounce_buffer)) { netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n"); return NULL; } sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size; pkt_ctrl->idx++; pkt_ctrl->descs_left_in_line--; return sq_desc; } static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq) { struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; struct ena_com_llq_info *llq_info = &io_sq->llq_info; int rc; if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) return 0; /* bounce buffer was used, so write it and get a new one */ if (likely(pkt_ctrl->idx)) { rc = ena_com_write_bounce_buffer_to_dev(io_sq, pkt_ctrl->curr_bounce_buf); if (unlikely(rc)) { netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Failed to write bounce buffer to device\n"); return rc; } pkt_ctrl->curr_bounce_buf = ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); memset(io_sq->llq_buf_ctrl.curr_bounce_buf, 0x0, llq_info->desc_list_entry_size); } pkt_ctrl->idx = 0; pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header; return 0; } static void *get_sq_desc(struct ena_com_io_sq *io_sq) { if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) return get_sq_desc_llq(io_sq); return get_sq_desc_regular_queue(io_sq); } static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq) { struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; struct ena_com_llq_info *llq_info = &io_sq->llq_info; int rc; if (!pkt_ctrl->descs_left_in_line) { rc = ena_com_write_bounce_buffer_to_dev(io_sq, pkt_ctrl->curr_bounce_buf); if (unlikely(rc)) { netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Failed to write bounce buffer to device\n"); return rc; } pkt_ctrl->curr_bounce_buf = ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); memset(io_sq->llq_buf_ctrl.curr_bounce_buf, 0x0, llq_info->desc_list_entry_size); pkt_ctrl->idx = 0; if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)) pkt_ctrl->descs_left_in_line = 1; else pkt_ctrl->descs_left_in_line = llq_info->desc_list_entry_size / io_sq->desc_entry_size; } return 0; } static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) { if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) return ena_com_sq_update_llq_tail(io_sq); io_sq->tail++; /* Switch phase bit in case of wrap around */ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) io_sq->phase ^= 1; return 0; } static struct ena_eth_io_rx_cdesc_base * ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx) { idx &= (io_cq->q_depth - 1); return (struct ena_eth_io_rx_cdesc_base *) ((uintptr_t)io_cq->cdesc_addr.virt_addr + idx * io_cq->cdesc_entry_size_in_bytes); } static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, u16 *first_cdesc_idx) { struct ena_eth_io_rx_cdesc_base *cdesc; u16 count = 0, head_masked; u32 last = 0; do { cdesc = ena_com_get_next_rx_cdesc(io_cq); if (!cdesc) break; ena_com_cq_inc_head(io_cq); count++; last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; } while (!last); if (last) { *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx; count += io_cq->cur_rx_pkt_cdesc_count; head_masked = io_cq->head & (io_cq->q_depth - 1); io_cq->cur_rx_pkt_cdesc_count = 0; io_cq->cur_rx_pkt_cdesc_start_idx = head_masked; netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n", io_cq->qid, *first_cdesc_idx, count); } else { io_cq->cur_rx_pkt_cdesc_count += count; count = 0; } return count; } static int ena_com_create_meta(struct ena_com_io_sq *io_sq, struct ena_com_tx_meta *ena_meta) { struct ena_eth_io_tx_meta_desc *meta_desc = NULL; meta_desc = get_sq_desc(io_sq); if (unlikely(!meta_desc)) return -EFAULT; memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc)); meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK; meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK; /* bits 0-9 of the mss */ meta_desc->word2 |= ((u32)ena_meta->mss << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK; /* bits 10-13 of the mss */ meta_desc->len_ctrl |= ((ena_meta->mss >> 10) << ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK; /* Extended meta desc */ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; meta_desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_META_DESC_PHASE_MASK; meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK; meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; meta_desc->word2 |= ena_meta->l3_hdr_len & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; meta_desc->word2 |= (ena_meta->l3_hdr_offset << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK; meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK; return ena_com_sq_update_tail(io_sq); } static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, struct ena_com_tx_ctx *ena_tx_ctx, bool *have_meta) { struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; /* When disable meta caching is set, don't bother to save the meta and * compare it to the stored version, just create the meta */ if (io_sq->disable_meta_caching) { if (unlikely(!ena_tx_ctx->meta_valid)) return -EINVAL; *have_meta = true; return ena_com_create_meta(io_sq, ena_meta); } if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) { *have_meta = true; /* Cache the meta desc */ memcpy(&io_sq->cached_tx_meta, ena_meta, sizeof(struct ena_com_tx_meta)); return ena_com_create_meta(io_sq, ena_meta); } *have_meta = false; return 0; } static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq, struct ena_com_rx_ctx *ena_rx_ctx, struct ena_eth_io_rx_cdesc_base *cdesc) { ena_rx_ctx->l3_proto = cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK; ena_rx_ctx->l4_proto = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; ena_rx_ctx->l3_csum_err = !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT); ena_rx_ctx->l4_csum_err = !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT); ena_rx_ctx->l4_csum_checked = !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT); ena_rx_ctx->hash = cdesc->hash; ena_rx_ctx->frag = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT; netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n", ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status); } /*****************************************************************************/ /***************************** API **********************************/ /*****************************************************************************/ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, struct ena_com_tx_ctx *ena_tx_ctx, int *nb_hw_desc) { struct ena_eth_io_tx_desc *desc = NULL; struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs; void *buffer_to_push = ena_tx_ctx->push_header; u16 header_len = ena_tx_ctx->header_len; u16 num_bufs = ena_tx_ctx->num_bufs; u16 start_tail = io_sq->tail; int i, rc; bool have_meta; u64 addr_hi; WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type"); /* num_bufs +1 for potential meta desc */ if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) { netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Not enough space in the tx queue\n"); return -ENOMEM; } if (unlikely(header_len > io_sq->tx_max_header_size)) { netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Header size is too large %d max header: %d\n", header_len, io_sq->tx_max_header_size); return -EINVAL; } if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) { netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Push header wasn't provided in LLQ mode\n"); return -EINVAL; } rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len); if (unlikely(rc)) return rc; rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta); if (unlikely(rc)) { netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Failed to create and store tx meta desc\n"); return rc; } /* If the caller doesn't want to send packets */ if (unlikely(!num_bufs && !header_len)) { rc = ena_com_close_bounce_buffer(io_sq); if (rc) netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Failed to write buffers to LLQ\n"); *nb_hw_desc = io_sq->tail - start_tail; return rc; } desc = get_sq_desc(io_sq); if (unlikely(!desc)) return -EFAULT; memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); /* Set first desc when we don't have meta descriptor */ if (!have_meta) desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK; desc->buff_addr_hi_hdr_sz |= ((u32)header_len << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK; desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_DESC_PHASE_MASK; desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK; /* Bits 0-9 */ desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK; desc->meta_ctrl |= (ena_tx_ctx->df << ENA_ETH_IO_TX_DESC_DF_SHIFT) & ENA_ETH_IO_TX_DESC_DF_MASK; /* Bits 10-15 */ desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK; if (ena_tx_ctx->meta_valid) { desc->meta_ctrl |= (ena_tx_ctx->tso_enable << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & ENA_ETH_IO_TX_DESC_TSO_EN_MASK; desc->meta_ctrl |= ena_tx_ctx->l3_proto & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK; desc->meta_ctrl |= (ena_tx_ctx->l4_proto << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK; desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK; desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK; desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK; } for (i = 0; i < num_bufs; i++) { /* The first desc share the same desc as the header */ if (likely(i != 0)) { rc = ena_com_sq_update_tail(io_sq); if (unlikely(rc)) { netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Failed to update sq tail\n"); return rc; } desc = get_sq_desc(io_sq); if (unlikely(!desc)) return -EFAULT; memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_DESC_PHASE_MASK; } desc->len_ctrl |= ena_bufs->len & ENA_ETH_IO_TX_DESC_LENGTH_MASK; addr_hi = ((ena_bufs->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); desc->buff_addr_lo = (u32)ena_bufs->paddr; desc->buff_addr_hi_hdr_sz |= addr_hi & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK; ena_bufs++; } /* set the last desc indicator */ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK; rc = ena_com_sq_update_tail(io_sq); if (unlikely(rc)) { netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Failed to update sq tail of the last descriptor\n"); return rc; } rc = ena_com_close_bounce_buffer(io_sq); *nb_hw_desc = io_sq->tail - start_tail; return rc; } int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, struct ena_com_io_sq *io_sq, struct ena_com_rx_ctx *ena_rx_ctx) { struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0]; struct ena_eth_io_rx_cdesc_base *cdesc = NULL; u16 q_depth = io_cq->q_depth; u16 cdesc_idx = 0; u16 nb_hw_desc; u16 i = 0; WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx); if (nb_hw_desc == 0) { ena_rx_ctx->descs = nb_hw_desc; return 0; } netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc); if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) { netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs); return -ENOSPC; } cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx); ena_rx_ctx->pkt_offset = cdesc->offset; do { ena_buf[i].len = cdesc->length; ena_buf[i].req_id = cdesc->req_id; if (unlikely(ena_buf[i].req_id >= q_depth)) return -EIO; if (++i >= nb_hw_desc) break; cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i); } while (1); /* Update SQ head ptr */ io_sq->next_to_comp += nb_hw_desc; netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid, io_sq->next_to_comp); /* Get rx flags from the last pkt */ ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc); ena_rx_ctx->descs = nb_hw_desc; return 0; } int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, struct ena_com_buf *ena_buf, u16 req_id) { struct ena_eth_io_rx_desc *desc; WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1))) return -ENOSPC; desc = get_sq_desc(io_sq); if (unlikely(!desc)) return -EFAULT; memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc)); desc->length = ena_buf->len; desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK | ENA_ETH_IO_RX_DESC_LAST_MASK | ENA_ETH_IO_RX_DESC_COMP_REQ_MASK | (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK); desc->req_id = req_id; netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid, req_id); desc->buff_addr_lo = (u32)ena_buf->paddr; desc->buff_addr_hi = ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); return ena_com_sq_update_tail(io_sq); } bool ena_com_cq_empty(struct ena_com_io_cq *io_cq) { struct ena_eth_io_rx_cdesc_base *cdesc; cdesc = ena_com_get_next_rx_cdesc(io_cq); if (cdesc) return false; else return true; }
linux-master
drivers/net/ethernet/amazon/ena/ena_eth_com.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. */ #include "ena_com.h" /*****************************************************************************/ /*****************************************************************************/ /* Timeout in micro-sec */ #define ADMIN_CMD_TIMEOUT_US (3000000) #define ENA_ASYNC_QUEUE_DEPTH 16 #define ENA_ADMIN_QUEUE_DEPTH 32 #define ENA_CTRL_MAJOR 0 #define ENA_CTRL_MINOR 0 #define ENA_CTRL_SUB_MINOR 1 #define MIN_ENA_CTRL_VER \ (((ENA_CTRL_MAJOR) << \ (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \ ((ENA_CTRL_MINOR) << \ (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \ (ENA_CTRL_SUB_MINOR)) #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4 #define ENA_REGS_ADMIN_INTR_MASK 1 #define ENA_MAX_BACKOFF_DELAY_EXP 16U #define ENA_MIN_ADMIN_POLL_US 100 #define ENA_MAX_ADMIN_POLL_US 5000 /*****************************************************************************/ /*****************************************************************************/ /*****************************************************************************/ enum ena_cmd_status { ENA_CMD_SUBMITTED, ENA_CMD_COMPLETED, /* Abort - canceled by the driver */ ENA_CMD_ABORTED, }; struct ena_comp_ctx { struct completion wait_event; struct ena_admin_acq_entry *user_cqe; u32 comp_size; enum ena_cmd_status status; /* status from the device */ u8 comp_status; u8 cmd_opcode; bool occupied; }; struct ena_com_stats_ctx { struct ena_admin_aq_get_stats_cmd get_cmd; struct ena_admin_acq_get_stats_resp get_resp; }; static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, struct ena_common_mem_addr *ena_addr, dma_addr_t addr) { if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { netdev_err(ena_dev->net_device, "DMA address has more bits that the device supports\n"); return -EINVAL; } ena_addr->mem_addr_low = lower_32_bits(addr); ena_addr->mem_addr_high = (u16)upper_32_bits(addr); return 0; } static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue) { struct ena_com_dev *ena_dev = admin_queue->ena_dev; struct ena_com_admin_sq *sq = &admin_queue->sq; u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL); if (!sq->entries) { netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } sq->head = 0; sq->tail = 0; sq->phase = 1; sq->db_addr = NULL; return 0; } static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue) { struct ena_com_dev *ena_dev = admin_queue->ena_dev; struct ena_com_admin_cq *cq = &admin_queue->cq; u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL); if (!cq->entries) { netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } cq->head = 0; cq->phase = 1; return 0; } static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, struct ena_aenq_handlers *aenq_handlers) { struct ena_com_aenq *aenq = &ena_dev->aenq; u32 addr_low, addr_high, aenq_caps; u16 size; ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL); if (!aenq->entries) { netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } aenq->head = aenq->q_depth; aenq->phase = 1; addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); aenq_caps = 0; aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; aenq_caps |= (sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); if (unlikely(!aenq_handlers)) { netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n"); return -EINVAL; } aenq->aenq_handlers = aenq_handlers; return 0; } static void comp_ctxt_release(struct ena_com_admin_queue *queue, struct ena_comp_ctx *comp_ctx) { comp_ctx->occupied = false; atomic_dec(&queue->outstanding_cmds); } static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue, u16 command_id, bool capture) { if (unlikely(command_id >= admin_queue->q_depth)) { netdev_err(admin_queue->ena_dev->net_device, "Command id is larger than the queue size. cmd_id: %u queue size %d\n", command_id, admin_queue->q_depth); return NULL; } if (unlikely(!admin_queue->comp_ctx)) { netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n"); return NULL; } if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) { netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n"); return NULL; } if (capture) { atomic_inc(&admin_queue->outstanding_cmds); admin_queue->comp_ctx[command_id].occupied = true; } return &admin_queue->comp_ctx[command_id]; } static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, struct ena_admin_aq_entry *cmd, size_t cmd_size_in_bytes, struct ena_admin_acq_entry *comp, size_t comp_size_in_bytes) { struct ena_comp_ctx *comp_ctx; u16 tail_masked, cmd_id; u16 queue_size_mask; u16 cnt; queue_size_mask = admin_queue->q_depth - 1; tail_masked = admin_queue->sq.tail & queue_size_mask; /* In case of queue FULL */ cnt = (u16)atomic_read(&admin_queue->outstanding_cmds); if (cnt >= admin_queue->q_depth) { netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n"); admin_queue->stats.out_of_space++; return ERR_PTR(-ENOSPC); } cmd_id = admin_queue->curr_cmd_id; cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; cmd->aq_common_descriptor.command_id |= cmd_id & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true); if (unlikely(!comp_ctx)) return ERR_PTR(-EINVAL); comp_ctx->status = ENA_CMD_SUBMITTED; comp_ctx->comp_size = (u32)comp_size_in_bytes; comp_ctx->user_cqe = comp; comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; reinit_completion(&comp_ctx->wait_event); memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & queue_size_mask; admin_queue->sq.tail++; admin_queue->stats.submitted_cmd++; if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) admin_queue->sq.phase = !admin_queue->sq.phase; writel(admin_queue->sq.tail, admin_queue->sq.db_addr); return comp_ctx; } static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue) { struct ena_com_dev *ena_dev = admin_queue->ena_dev; size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx); struct ena_comp_ctx *comp_ctx; u16 i; admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL); if (unlikely(!admin_queue->comp_ctx)) { netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } for (i = 0; i < admin_queue->q_depth; i++) { comp_ctx = get_comp_ctxt(admin_queue, i, false); if (comp_ctx) init_completion(&comp_ctx->wait_event); } return 0; } static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, struct ena_admin_aq_entry *cmd, size_t cmd_size_in_bytes, struct ena_admin_acq_entry *comp, size_t comp_size_in_bytes) { unsigned long flags = 0; struct ena_comp_ctx *comp_ctx; spin_lock_irqsave(&admin_queue->q_lock, flags); if (unlikely(!admin_queue->running_state)) { spin_unlock_irqrestore(&admin_queue->q_lock, flags); return ERR_PTR(-ENODEV); } comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size_in_bytes, comp, comp_size_in_bytes); if (IS_ERR(comp_ctx)) admin_queue->running_state = false; spin_unlock_irqrestore(&admin_queue->q_lock, flags); return comp_ctx; } static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, struct ena_com_create_io_ctx *ctx, struct ena_com_io_sq *io_sq) { size_t size; int dev_node = 0; memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits; io_sq->desc_entry_size = (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? sizeof(struct ena_eth_io_tx_desc) : sizeof(struct ena_eth_io_rx_desc); size = io_sq->desc_entry_size * io_sq->q_depth; if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { dev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); io_sq->desc_addr.virt_addr = dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr, GFP_KERNEL); set_dev_node(ena_dev->dmadev, dev_node); if (!io_sq->desc_addr.virt_addr) { io_sq->desc_addr.virt_addr = dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr, GFP_KERNEL); } if (!io_sq->desc_addr.virt_addr) { netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } } if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { /* Allocate bounce buffers */ io_sq->bounce_buf_ctrl.buffer_size = ena_dev->llq_info.desc_list_entry_size; io_sq->bounce_buf_ctrl.buffers_num = ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; io_sq->bounce_buf_ctrl.next_to_use = 0; size = io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num; dev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); set_dev_node(ena_dev->dmadev, dev_node); if (!io_sq->bounce_buf_ctrl.base_buffer) io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); if (!io_sq->bounce_buf_ctrl.base_buffer) { netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n"); return -ENOMEM; } memcpy(&io_sq->llq_info, &ena_dev->llq_info, sizeof(io_sq->llq_info)); /* Initiate the first bounce buffer */ io_sq->llq_buf_ctrl.curr_bounce_buf = ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); memset(io_sq->llq_buf_ctrl.curr_bounce_buf, 0x0, io_sq->llq_info.desc_list_entry_size); io_sq->llq_buf_ctrl.descs_left_in_line = io_sq->llq_info.descs_num_before_header; io_sq->disable_meta_caching = io_sq->llq_info.disable_meta_caching; if (io_sq->llq_info.max_entries_in_tx_burst > 0) io_sq->entries_in_tx_burst_left = io_sq->llq_info.max_entries_in_tx_burst; } io_sq->tail = 0; io_sq->next_to_comp = 0; io_sq->phase = 1; return 0; } static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, struct ena_com_create_io_ctx *ctx, struct ena_com_io_cq *io_cq) { size_t size; int prev_node = 0; memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr)); /* Use the basic completion descriptor for Rx */ io_cq->cdesc_entry_size_in_bytes = (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? sizeof(struct ena_eth_io_tx_cdesc) : sizeof(struct ena_eth_io_rx_cdesc_base); size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; prev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); io_cq->cdesc_addr.virt_addr = dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); set_dev_node(ena_dev->dmadev, prev_node); if (!io_cq->cdesc_addr.virt_addr) { io_cq->cdesc_addr.virt_addr = dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); } if (!io_cq->cdesc_addr.virt_addr) { netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } io_cq->phase = 1; io_cq->head = 0; return 0; } static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, struct ena_admin_acq_entry *cqe) { struct ena_comp_ctx *comp_ctx; u16 cmd_id; cmd_id = cqe->acq_common_descriptor.command & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); if (unlikely(!comp_ctx)) { netdev_err(admin_queue->ena_dev->net_device, "comp_ctx is NULL. Changing the admin queue running state\n"); admin_queue->running_state = false; return; } comp_ctx->status = ENA_CMD_COMPLETED; comp_ctx->comp_status = cqe->acq_common_descriptor.status; if (comp_ctx->user_cqe) memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); if (!admin_queue->polling) complete(&comp_ctx->wait_event); } static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) { struct ena_admin_acq_entry *cqe = NULL; u16 comp_num = 0; u16 head_masked; u8 phase; head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); phase = admin_queue->cq.phase; cqe = &admin_queue->cq.entries[head_masked]; /* Go over all the completions */ while ((READ_ONCE(cqe->acq_common_descriptor.flags) & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { /* Do not read the rest of the completion entry before the * phase bit was validated */ dma_rmb(); ena_com_handle_single_admin_completion(admin_queue, cqe); head_masked++; comp_num++; if (unlikely(head_masked == admin_queue->q_depth)) { head_masked = 0; phase = !phase; } cqe = &admin_queue->cq.entries[head_masked]; } admin_queue->cq.head += comp_num; admin_queue->cq.phase = phase; admin_queue->sq.head += comp_num; admin_queue->stats.completed_cmd += comp_num; } static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue, u8 comp_status) { if (unlikely(comp_status != 0)) netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n", comp_status); switch (comp_status) { case ENA_ADMIN_SUCCESS: return 0; case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: return -ENOMEM; case ENA_ADMIN_UNSUPPORTED_OPCODE: return -EOPNOTSUPP; case ENA_ADMIN_BAD_OPCODE: case ENA_ADMIN_MALFORMED_REQUEST: case ENA_ADMIN_ILLEGAL_PARAMETER: case ENA_ADMIN_UNKNOWN_ERROR: return -EINVAL; case ENA_ADMIN_RESOURCE_BUSY: return -EAGAIN; } return -EINVAL; } static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us) { exp = min_t(u32, exp, ENA_MAX_BACKOFF_DELAY_EXP); delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us); delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US); usleep_range(delay_us, 2 * delay_us); } static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, struct ena_com_admin_queue *admin_queue) { unsigned long flags = 0; unsigned long timeout; int ret; u32 exp = 0; timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout); while (1) { spin_lock_irqsave(&admin_queue->q_lock, flags); ena_com_handle_admin_completion(admin_queue); spin_unlock_irqrestore(&admin_queue->q_lock, flags); if (comp_ctx->status != ENA_CMD_SUBMITTED) break; if (time_is_before_jiffies(timeout)) { netdev_err(admin_queue->ena_dev->net_device, "Wait for completion (polling) timeout\n"); /* ENA didn't have any completion */ spin_lock_irqsave(&admin_queue->q_lock, flags); admin_queue->stats.no_completion++; admin_queue->running_state = false; spin_unlock_irqrestore(&admin_queue->q_lock, flags); ret = -ETIME; goto err; } ena_delay_exponential_backoff_us(exp++, admin_queue->ena_dev->ena_min_poll_delay_us); } if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n"); spin_lock_irqsave(&admin_queue->q_lock, flags); admin_queue->stats.aborted_cmd++; spin_unlock_irqrestore(&admin_queue->q_lock, flags); ret = -ENODEV; goto err; } WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status); ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status); err: comp_ctxt_release(admin_queue, comp_ctx); return ret; } /* * Set the LLQ configurations of the firmware * * The driver provides only the enabled feature values to the device, * which in turn, checks if they are supported. */ static int ena_com_set_llq(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue; struct ena_admin_set_feat_cmd cmd; struct ena_admin_set_feat_resp resp; struct ena_com_llq_info *llq_info = &ena_dev->llq_info; int ret; memset(&cmd, 0x0, sizeof(cmd)); admin_queue = &ena_dev->admin_queue; cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; cmd.feat_common.feature_id = ENA_ADMIN_LLQ; cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; cmd.u.llq.accel_mode.u.set.enabled_flags = BIT(ENA_ADMIN_DISABLE_META_CACHING) | BIT(ENA_ADMIN_LIMIT_TX_BURST); ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&cmd, sizeof(cmd), (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret); return ret; } static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, struct ena_admin_feature_llq_desc *llq_features, struct ena_llq_configurations *llq_default_cfg) { struct ena_com_llq_info *llq_info = &ena_dev->llq_info; struct ena_admin_accel_mode_get llq_accel_mode_get; u16 supported_feat; int rc; memset(llq_info, 0, sizeof(*llq_info)); supported_feat = llq_features->header_location_ctrl_supported; if (likely(supported_feat & llq_default_cfg->llq_header_location)) { llq_info->header_location_ctrl = llq_default_cfg->llq_header_location; } else { netdev_err(ena_dev->net_device, "Invalid header location control, supported: 0x%x\n", supported_feat); return -EINVAL; } if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { supported_feat = llq_features->descriptors_stride_ctrl_supported; if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; } else { if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) { llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; } else { netdev_err(ena_dev->net_device, "Invalid desc_stride_ctrl, supported: 0x%x\n", supported_feat); return -EINVAL; } netdev_err(ena_dev->net_device, "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", llq_default_cfg->llq_stride_ctrl, supported_feat, llq_info->desc_stride_ctrl); } } else { llq_info->desc_stride_ctrl = 0; } supported_feat = llq_features->entry_size_ctrl_supported; if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) { llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size; llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value; } else { if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) { llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B; llq_info->desc_list_entry_size = 128; } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) { llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B; llq_info->desc_list_entry_size = 192; } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) { llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; llq_info->desc_list_entry_size = 256; } else { netdev_err(ena_dev->net_device, "Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat); return -EINVAL; } netdev_err(ena_dev->net_device, "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", llq_default_cfg->llq_ring_entry_size, supported_feat, llq_info->desc_list_entry_size); } if (unlikely(llq_info->desc_list_entry_size & 0x7)) { /* The desc list entry size should be whole multiply of 8 * This requirement comes from __iowrite64_copy() */ netdev_err(ena_dev->net_device, "Illegal entry size %d\n", llq_info->desc_list_entry_size); return -EINVAL; } if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) llq_info->descs_per_entry = llq_info->desc_list_entry_size / sizeof(struct ena_eth_io_tx_desc); else llq_info->descs_per_entry = 1; supported_feat = llq_features->desc_num_before_header_supported; if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) { llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header; } else { if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) { llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) { llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1; } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) { llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4; } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; } else { netdev_err(ena_dev->net_device, "Invalid descs_num_before_header, supported: 0x%x\n", supported_feat); return -EINVAL; } netdev_err(ena_dev->net_device, "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", llq_default_cfg->llq_num_decs_before_header, supported_feat, llq_info->descs_num_before_header); } /* Check for accelerated queue supported */ llq_accel_mode_get = llq_features->accel_mode.u.get; llq_info->disable_meta_caching = !!(llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_DISABLE_META_CACHING)); if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST)) llq_info->max_entries_in_tx_burst = llq_accel_mode_get.max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value; rc = ena_com_set_llq(ena_dev); if (rc) netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc); return rc; } static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, struct ena_com_admin_queue *admin_queue) { unsigned long flags = 0; int ret; wait_for_completion_timeout(&comp_ctx->wait_event, usecs_to_jiffies( admin_queue->completion_timeout)); /* In case the command wasn't completed find out the root cause. * There might be 2 kinds of errors * 1) No completion (timeout reached) * 2) There is completion but the device didn't get any msi-x interrupt. */ if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { spin_lock_irqsave(&admin_queue->q_lock, flags); ena_com_handle_admin_completion(admin_queue); admin_queue->stats.no_completion++; spin_unlock_irqrestore(&admin_queue->q_lock, flags); if (comp_ctx->status == ENA_CMD_COMPLETED) { netdev_err(admin_queue->ena_dev->net_device, "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF"); /* Check if fallback to polling is enabled */ if (admin_queue->auto_polling) admin_queue->polling = true; } else { netdev_err(admin_queue->ena_dev->net_device, "The ena device didn't send a completion for the admin cmd %d status %d\n", comp_ctx->cmd_opcode, comp_ctx->status); } /* Check if shifted to polling mode. * This will happen if there is a completion without an interrupt * and autopolling mode is enabled. Continuing normal execution in such case */ if (!admin_queue->polling) { admin_queue->running_state = false; ret = -ETIME; goto err; } } ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status); err: comp_ctxt_release(admin_queue, comp_ctx); return ret; } /* This method read the hardware device register through posting writes * and waiting for response * On timeout the function will return ENA_MMIO_READ_TIMEOUT */ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) { struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = mmio_read->read_resp; u32 mmio_read_reg, ret, i; unsigned long flags = 0; u32 timeout = mmio_read->reg_read_to; might_sleep(); if (timeout == 0) timeout = ENA_REG_READ_TIMEOUT; /* If readless is disabled, perform regular read */ if (!mmio_read->readless_supported) return readl(ena_dev->reg_bar + offset); spin_lock_irqsave(&mmio_read->lock, flags); mmio_read->seq_num++; read_resp->req_id = mmio_read->seq_num + 0xDEAD; mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) & ENA_REGS_MMIO_REG_READ_REG_OFF_MASK; mmio_read_reg |= mmio_read->seq_num & ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); for (i = 0; i < timeout; i++) { if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num) break; udelay(1); } if (unlikely(i == timeout)) { netdev_err(ena_dev->net_device, "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n", mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off); ret = ENA_MMIO_READ_TIMEOUT; goto err; } if (read_resp->reg_off != offset) { netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n"); ret = ENA_MMIO_READ_TIMEOUT; } else { ret = read_resp->reg_val; } err: spin_unlock_irqrestore(&mmio_read->lock, flags); return ret; } /* There are two types to wait for completion. * Polling mode - wait until the completion is available. * Async mode - wait on wait queue until the completion is ready * (or the timeout expired). * It is expected that the IRQ called ena_com_handle_admin_completion * to mark the completions. */ static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, struct ena_com_admin_queue *admin_queue) { if (admin_queue->polling) return ena_com_wait_and_process_admin_cq_polling(comp_ctx, admin_queue); return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx, admin_queue); } static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, struct ena_com_io_sq *io_sq) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; struct ena_admin_aq_destroy_sq_cmd destroy_cmd; struct ena_admin_acq_destroy_sq_resp_desc destroy_resp; u8 direction; int ret; memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) direction = ENA_ADMIN_SQ_DIRECTION_TX; else direction = ENA_ADMIN_SQ_DIRECTION_RX; destroy_cmd.sq.sq_identity |= (direction << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & ENA_ADMIN_SQ_SQ_DIRECTION_MASK; destroy_cmd.sq.sq_idx = io_sq->idx; destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ; ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&destroy_cmd, sizeof(destroy_cmd), (struct ena_admin_acq_entry *)&destroy_resp, sizeof(destroy_resp)); if (unlikely(ret && (ret != -ENODEV))) netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret); return ret; } static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, struct ena_com_io_sq *io_sq, struct ena_com_io_cq *io_cq) { size_t size; if (io_cq->cdesc_addr.virt_addr) { size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr, io_cq->cdesc_addr.phys_addr); io_cq->cdesc_addr.virt_addr = NULL; } if (io_sq->desc_addr.virt_addr) { size = io_sq->desc_entry_size * io_sq->q_depth; dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr, io_sq->desc_addr.phys_addr); io_sq->desc_addr.virt_addr = NULL; } if (io_sq->bounce_buf_ctrl.base_buffer) { devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer); io_sq->bounce_buf_ctrl.base_buffer = NULL; } } static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, u16 exp_state) { u32 val, exp = 0; unsigned long timeout_stamp; /* Convert timeout from resolution of 100ms to us resolution. */ timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout); while (1) { val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); return -ETIME; } if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) == exp_state) return 0; if (time_is_before_jiffies(timeout_stamp)) return -ETIME; ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us); } } static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, enum ena_admin_aq_feature_id feature_id) { u32 feature_mask = 1 << feature_id; /* Device attributes is always supported */ if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) && !(ena_dev->supported_features & feature_mask)) return false; return true; } static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, struct ena_admin_get_feat_resp *get_resp, enum ena_admin_aq_feature_id feature_id, dma_addr_t control_buf_dma_addr, u32 control_buff_size, u8 feature_ver) { struct ena_com_admin_queue *admin_queue; struct ena_admin_get_feat_cmd get_cmd; int ret; if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id); return -EOPNOTSUPP; } memset(&get_cmd, 0x0, sizeof(get_cmd)); admin_queue = &ena_dev->admin_queue; get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE; if (control_buff_size) get_cmd.aq_common_descriptor.flags = ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; else get_cmd.aq_common_descriptor.flags = 0; ret = ena_com_mem_addr_set(ena_dev, &get_cmd.control_buffer.address, control_buf_dma_addr); if (unlikely(ret)) { netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } get_cmd.control_buffer.length = control_buff_size; get_cmd.feat_common.feature_version = feature_ver; get_cmd.feat_common.feature_id = feature_id; ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *) &get_cmd, sizeof(get_cmd), (struct ena_admin_acq_entry *) get_resp, sizeof(*get_resp)); if (unlikely(ret)) netdev_err(ena_dev->net_device, "Failed to submit get_feature command %d error: %d\n", feature_id, ret); return ret; } static int ena_com_get_feature(struct ena_com_dev *ena_dev, struct ena_admin_get_feat_resp *get_resp, enum ena_admin_aq_feature_id feature_id, u8 feature_ver) { return ena_com_get_feature_ex(ena_dev, get_resp, feature_id, 0, 0, feature_ver); } int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev) { return ena_dev->rss.hash_func; } static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev) { struct ena_admin_feature_rss_flow_hash_control *hash_key = (ena_dev->rss).hash_key; netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key)); /* The key buffer is stored in the device in an array of * uint32 elements. */ hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS; } static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) return -EOPNOTSUPP; rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), &rss->hash_key_dma_addr, GFP_KERNEL); if (unlikely(!rss->hash_key)) return -ENOMEM; return 0; } static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; if (rss->hash_key) dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key, rss->hash_key_dma_addr); rss->hash_key = NULL; } static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), &rss->hash_ctrl_dma_addr, GFP_KERNEL); if (unlikely(!rss->hash_ctrl)) return -ENOMEM; return 0; } static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; if (rss->hash_ctrl) dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl, rss->hash_ctrl_dma_addr); rss->hash_ctrl = NULL; } static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, u16 log_size) { struct ena_rss *rss = &ena_dev->rss; struct ena_admin_get_feat_resp get_resp; size_t tbl_size; int ret; ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0); if (unlikely(ret)) return ret; if ((get_resp.u.ind_table.min_size > log_size) || (get_resp.u.ind_table.max_size < log_size)) { netdev_err(ena_dev->net_device, "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", 1 << log_size, 1 << get_resp.u.ind_table.min_size, 1 << get_resp.u.ind_table.max_size); return -EINVAL; } tbl_size = (1ULL << log_size) * sizeof(struct ena_admin_rss_ind_table_entry); rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); if (unlikely(!rss->rss_ind_tbl)) goto mem_err1; tbl_size = (1ULL << log_size) * sizeof(u16); rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL); if (unlikely(!rss->host_rss_ind_tbl)) goto mem_err2; rss->tbl_log_size = log_size; return 0; mem_err2: tbl_size = (1ULL << log_size) * sizeof(struct ena_admin_rss_ind_table_entry); dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr); rss->rss_ind_tbl = NULL; mem_err1: rss->tbl_log_size = 0; return -ENOMEM; } static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; size_t tbl_size = (1ULL << rss->tbl_log_size) * sizeof(struct ena_admin_rss_ind_table_entry); if (rss->rss_ind_tbl) dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr); rss->rss_ind_tbl = NULL; if (rss->host_rss_ind_tbl) devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl); rss->host_rss_ind_tbl = NULL; } static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, struct ena_com_io_sq *io_sq, u16 cq_idx) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; struct ena_admin_aq_create_sq_cmd create_cmd; struct ena_admin_acq_create_sq_resp_desc cmd_completion; u8 direction; int ret; memset(&create_cmd, 0x0, sizeof(create_cmd)); create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) direction = ENA_ADMIN_SQ_DIRECTION_TX; else direction = ENA_ADMIN_SQ_DIRECTION_RX; create_cmd.sq_identity |= (direction << ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; create_cmd.sq_caps_2 |= io_sq->mem_queue_type & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; create_cmd.sq_caps_3 |= ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; create_cmd.cq_idx = cq_idx; create_cmd.sq_depth = io_sq->q_depth; if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { ret = ena_com_mem_addr_set(ena_dev, &create_cmd.sq_ba, io_sq->desc_addr.phys_addr); if (unlikely(ret)) { netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } } ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&create_cmd, sizeof(create_cmd), (struct ena_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (unlikely(ret)) { netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret); return ret; } io_sq->idx = cmd_completion.sq_idx; io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + (uintptr_t)cmd_completion.sq_doorbell_offset); if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + cmd_completion.llq_headers_offset); io_sq->desc_addr.pbuf_dev_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + cmd_completion.llq_descriptors_offset); } netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); return ret; } static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; struct ena_com_io_sq *io_sq; u16 qid; int i; for (i = 0; i < 1 << rss->tbl_log_size; i++) { qid = rss->host_rss_ind_tbl[i]; if (qid >= ENA_TOTAL_NUM_QUEUES) return -EINVAL; io_sq = &ena_dev->io_sq_queues[qid]; if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) return -EINVAL; rss->rss_ind_tbl[i].cq_idx = io_sq->idx; } return 0; } static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, u16 intr_delay_resolution) { u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution; if (unlikely(!intr_delay_resolution)) { netdev_err(ena_dev->net_device, "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; } /* update Rx */ ena_dev->intr_moder_rx_interval = ena_dev->intr_moder_rx_interval * prev_intr_delay_resolution / intr_delay_resolution; /* update Tx */ ena_dev->intr_moder_tx_interval = ena_dev->intr_moder_tx_interval * prev_intr_delay_resolution / intr_delay_resolution; ena_dev->intr_delay_resolution = intr_delay_resolution; } /*****************************************************************************/ /******************************* API ******************************/ /*****************************************************************************/ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, struct ena_admin_aq_entry *cmd, size_t cmd_size, struct ena_admin_acq_entry *comp, size_t comp_size) { struct ena_comp_ctx *comp_ctx; int ret; comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, comp, comp_size); if (IS_ERR(comp_ctx)) { ret = PTR_ERR(comp_ctx); if (ret == -ENODEV) netdev_dbg(admin_queue->ena_dev->net_device, "Failed to submit command [%d]\n", ret); else netdev_err(admin_queue->ena_dev->net_device, "Failed to submit command [%d]\n", ret); return ret; } ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); if (unlikely(ret)) { if (admin_queue->running_state) netdev_err(admin_queue->ena_dev->net_device, "Failed to process command. ret = %d\n", ret); else netdev_dbg(admin_queue->ena_dev->net_device, "Failed to process command. ret = %d\n", ret); } return ret; } int ena_com_create_io_cq(struct ena_com_dev *ena_dev, struct ena_com_io_cq *io_cq) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; struct ena_admin_aq_create_cq_cmd create_cmd; struct ena_admin_acq_create_cq_resp_desc cmd_completion; int ret; memset(&create_cmd, 0x0, sizeof(create_cmd)); create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; create_cmd.cq_caps_1 |= ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; create_cmd.msix_vector = io_cq->msix_vector; create_cmd.cq_depth = io_cq->q_depth; ret = ena_com_mem_addr_set(ena_dev, &create_cmd.cq_ba, io_cq->cdesc_addr.phys_addr); if (unlikely(ret)) { netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&create_cmd, sizeof(create_cmd), (struct ena_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (unlikely(ret)) { netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret); return ret; } io_cq->idx = cmd_completion.cq_idx; io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + cmd_completion.cq_interrupt_unmask_register_offset); if (cmd_completion.cq_head_db_register_offset) io_cq->cq_head_db_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + cmd_completion.cq_head_db_register_offset); if (cmd_completion.numa_node_register_offset) io_cq->numa_node_cfg_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + cmd_completion.numa_node_register_offset); netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); return ret; } int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, struct ena_com_io_sq **io_sq, struct ena_com_io_cq **io_cq) { if (qid >= ENA_TOTAL_NUM_QUEUES) { netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid, ENA_TOTAL_NUM_QUEUES); return -EINVAL; } *io_sq = &ena_dev->io_sq_queues[qid]; *io_cq = &ena_dev->io_cq_queues[qid]; return 0; } void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; struct ena_comp_ctx *comp_ctx; u16 i; if (!admin_queue->comp_ctx) return; for (i = 0; i < admin_queue->q_depth; i++) { comp_ctx = get_comp_ctxt(admin_queue, i, false); if (unlikely(!comp_ctx)) break; comp_ctx->status = ENA_CMD_ABORTED; complete(&comp_ctx->wait_event); } } void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; unsigned long flags = 0; u32 exp = 0; spin_lock_irqsave(&admin_queue->q_lock, flags); while (atomic_read(&admin_queue->outstanding_cmds) != 0) { spin_unlock_irqrestore(&admin_queue->q_lock, flags); ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us); spin_lock_irqsave(&admin_queue->q_lock, flags); } spin_unlock_irqrestore(&admin_queue->q_lock, flags); } int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, struct ena_com_io_cq *io_cq) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; struct ena_admin_aq_destroy_cq_cmd destroy_cmd; struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; int ret; memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); destroy_cmd.cq_idx = io_cq->idx; destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&destroy_cmd, sizeof(destroy_cmd), (struct ena_admin_acq_entry *)&destroy_resp, sizeof(destroy_resp)); if (unlikely(ret && (ret != -ENODEV))) netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret); return ret; } bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) { return ena_dev->admin_queue.running_state; } void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; unsigned long flags = 0; spin_lock_irqsave(&admin_queue->q_lock, flags); ena_dev->admin_queue.running_state = state; spin_unlock_irqrestore(&admin_queue->q_lock, flags); } void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) { u16 depth = ena_dev->aenq.q_depth; WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); /* Init head_db to mark that all entries in the queue * are initially available */ writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); } int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) { struct ena_com_admin_queue *admin_queue; struct ena_admin_set_feat_cmd cmd; struct ena_admin_set_feat_resp resp; struct ena_admin_get_feat_resp get_resp; int ret; ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0); if (ret) { dev_info(ena_dev->dmadev, "Can't get aenq configuration\n"); return ret; } if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { netdev_warn(ena_dev->net_device, "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", get_resp.u.aenq.supported_groups, groups_flag); return -EOPNOTSUPP; } memset(&cmd, 0x0, sizeof(cmd)); admin_queue = &ena_dev->admin_queue; cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; cmd.aq_common_descriptor.flags = 0; cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG; cmd.u.aenq.enabled_groups = groups_flag; ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&cmd, sizeof(cmd), (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret); return ret; } int ena_com_get_dma_width(struct ena_com_dev *ena_dev) { u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); u32 width; if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); return -ETIME; } width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width); if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width); return -EINVAL; } ena_dev->dma_addr_bits = width; return width; } int ena_com_validate_version(struct ena_com_dev *ena_dev) { u32 ver; u32 ctrl_ver; u32 ctrl_ver_masked; /* Make sure the ENA version and the controller version are at least * as the driver expects */ ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF); ctrl_ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CONTROLLER_VERSION_OFF); if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); return -ETIME; } dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n", (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n", (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); ctrl_ver_masked = (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) | (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK); /* Validate the ctrl version without the implementation ID */ if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { netdev_err(ena_dev->net_device, "ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); return -1; } return 0; } static void ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev, struct ena_com_admin_queue *admin_queue) { if (!admin_queue->comp_ctx) return; devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx); admin_queue->comp_ctx = NULL; } void ena_com_admin_destroy(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; struct ena_com_admin_cq *cq = &admin_queue->cq; struct ena_com_admin_sq *sq = &admin_queue->sq; struct ena_com_aenq *aenq = &ena_dev->aenq; u16 size; ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue); size = ADMIN_SQ_SIZE(admin_queue->q_depth); if (sq->entries) dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr); sq->entries = NULL; size = ADMIN_CQ_SIZE(admin_queue->q_depth); if (cq->entries) dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr); cq->entries = NULL; size = ADMIN_AENQ_SIZE(aenq->q_depth); if (ena_dev->aenq.entries) dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr); aenq->entries = NULL; } void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) { u32 mask_value = 0; if (polling) mask_value = ENA_REGS_ADMIN_INTR_MASK; writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); ena_dev->admin_queue.polling = polling; } void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, bool polling) { ena_dev->admin_queue.auto_polling = polling; } int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) { struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; spin_lock_init(&mmio_read->lock); mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), &mmio_read->read_resp_dma_addr, GFP_KERNEL); if (unlikely(!mmio_read->read_resp)) goto err; ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); mmio_read->read_resp->req_id = 0x0; mmio_read->seq_num = 0x0; mmio_read->readless_supported = true; return 0; err: return -ENOMEM; } void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) { struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; mmio_read->readless_supported = readless_supported; } void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) { struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp, mmio_read->read_resp_dma_addr); mmio_read->read_resp = NULL; } void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) { struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; u32 addr_low, addr_high; addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); } int ena_com_admin_init(struct ena_com_dev *ena_dev, struct ena_aenq_handlers *aenq_handlers) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; int ret; dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); return -ETIME; } if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n"); return -ENODEV; } admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; admin_queue->q_dmadev = ena_dev->dmadev; admin_queue->polling = false; admin_queue->curr_cmd_id = 0; atomic_set(&admin_queue->outstanding_cmds, 0); spin_lock_init(&admin_queue->q_lock); ret = ena_com_init_comp_ctxt(admin_queue); if (ret) goto error; ret = ena_com_admin_init_sq(admin_queue); if (ret) goto error; ret = ena_com_admin_init_cq(admin_queue); if (ret) goto error; admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + ENA_REGS_AQ_DB_OFF); addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); aq_caps = 0; aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; aq_caps |= (sizeof(struct ena_admin_aq_entry) << ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) & ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK; acq_caps = 0; acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; acq_caps |= (sizeof(struct ena_admin_acq_entry) << ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers); if (ret) goto error; admin_queue->ena_dev = ena_dev; admin_queue->running_state = true; return 0; error: ena_com_admin_destroy(ena_dev); return ret; } int ena_com_create_io_queue(struct ena_com_dev *ena_dev, struct ena_com_create_io_ctx *ctx) { struct ena_com_io_sq *io_sq; struct ena_com_io_cq *io_cq; int ret; if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n", ctx->qid, ENA_TOTAL_NUM_QUEUES); return -EINVAL; } io_sq = &ena_dev->io_sq_queues[ctx->qid]; io_cq = &ena_dev->io_cq_queues[ctx->qid]; memset(io_sq, 0x0, sizeof(*io_sq)); memset(io_cq, 0x0, sizeof(*io_cq)); /* Init CQ */ io_cq->q_depth = ctx->queue_size; io_cq->direction = ctx->direction; io_cq->qid = ctx->qid; io_cq->msix_vector = ctx->msix_vector; io_sq->q_depth = ctx->queue_size; io_sq->direction = ctx->direction; io_sq->qid = ctx->qid; io_sq->mem_queue_type = ctx->mem_queue_type; if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) /* header length is limited to 8 bits */ io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256); ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); if (ret) goto error; ret = ena_com_init_io_cq(ena_dev, ctx, io_cq); if (ret) goto error; ret = ena_com_create_io_cq(ena_dev, io_cq); if (ret) goto error; ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx); if (ret) goto destroy_io_cq; return 0; destroy_io_cq: ena_com_destroy_io_cq(ena_dev, io_cq); error: ena_com_io_queue_free(ena_dev, io_sq, io_cq); return ret; } void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) { struct ena_com_io_sq *io_sq; struct ena_com_io_cq *io_cq; if (qid >= ENA_TOTAL_NUM_QUEUES) { netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n", qid, ENA_TOTAL_NUM_QUEUES); return; } io_sq = &ena_dev->io_sq_queues[qid]; io_cq = &ena_dev->io_cq_queues[qid]; ena_com_destroy_io_sq(ena_dev, io_sq); ena_com_destroy_io_cq(ena_dev, io_cq); ena_com_io_queue_free(ena_dev, io_sq, io_cq); } int ena_com_get_link_params(struct ena_com_dev *ena_dev, struct ena_admin_get_feat_resp *resp) { return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0); } int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, struct ena_com_dev_get_features_ctx *get_feat_ctx) { struct ena_admin_get_feat_resp get_resp; int rc; rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_DEVICE_ATTRIBUTES, 0); if (rc) return rc; memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, sizeof(get_resp.u.dev_attr)); ena_dev->supported_features = get_resp.u.dev_attr.supported_features; ena_dev->capabilities = get_resp.u.dev_attr.capabilities; if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_MAX_QUEUES_EXT, ENA_FEATURE_MAX_QUEUE_EXT_VER); if (rc) return rc; if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER) return -EINVAL; memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext, sizeof(get_resp.u.max_queue_ext)); ena_dev->tx_max_header_size = get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size; } else { rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_MAX_QUEUES_NUM, 0); memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, sizeof(get_resp.u.max_queue)); ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size; if (rc) return rc; } rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0); if (rc) return rc; memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, sizeof(get_resp.u.aenq)); rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); if (rc) return rc; memcpy(&get_feat_ctx->offload, &get_resp.u.offload, sizeof(get_resp.u.offload)); /* Driver hints isn't mandatory admin command. So in case the * command isn't supported set driver hints to 0 */ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0); if (!rc) memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints)); else if (rc == -EOPNOTSUPP) memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints)); else return rc; rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0); if (!rc) memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq)); else if (rc == -EOPNOTSUPP) memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); else return rc; return 0; } void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev) { ena_com_handle_admin_completion(&ena_dev->admin_queue); } /* ena_handle_specific_aenq_event: * return the handler that is relevant to the specific event group */ static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev, u16 group) { struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers; if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) return aenq_handlers->handlers[group]; return aenq_handlers->unimplemented_handler; } /* ena_aenq_intr_handler: * handles the aenq incoming events. * pop events from the queue and apply the specific handler */ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data) { struct ena_admin_aenq_entry *aenq_e; struct ena_admin_aenq_common_desc *aenq_common; struct ena_com_aenq *aenq = &ena_dev->aenq; u64 timestamp; ena_aenq_handler handler_cb; u16 masked_head, processed = 0; u8 phase; masked_head = aenq->head & (aenq->q_depth - 1); phase = aenq->phase; aenq_e = &aenq->entries[masked_head]; /* Get first entry */ aenq_common = &aenq_e->aenq_common_desc; /* Go over all the events */ while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { /* Make sure the phase bit (ownership) is as expected before * reading the rest of the descriptor. */ dma_rmb(); timestamp = (u64)aenq_common->timestamp_low | ((u64)aenq_common->timestamp_high << 32); netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n", aenq_common->group, aenq_common->syndrome, timestamp); /* Handle specific event*/ handler_cb = ena_com_get_specific_aenq_cb(ena_dev, aenq_common->group); handler_cb(data, aenq_e); /* call the actual event handler*/ /* Get next event entry */ masked_head++; processed++; if (unlikely(masked_head == aenq->q_depth)) { masked_head = 0; phase = !phase; } aenq_e = &aenq->entries[masked_head]; aenq_common = &aenq_e->aenq_common_desc; } aenq->head += processed; aenq->phase = phase; /* Don't update aenq doorbell if there weren't any processed events */ if (!processed) return; /* write the aenq doorbell after all AENQ descriptors were read */ mb(); writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); } int ena_com_dev_reset(struct ena_com_dev *ena_dev, enum ena_regs_reset_reason_types reset_reason) { u32 stat, timeout, cap, reset_val; int rc; stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) { netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n"); return -ETIME; } if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n"); return -EINVAL; } timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; if (timeout == 0) { netdev_err(ena_dev->net_device, "Invalid timeout value\n"); return -EINVAL; } /* start reset */ reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) & ENA_REGS_DEV_CTL_RESET_REASON_MASK; writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); /* Write again the MMIO read request address */ ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); rc = wait_for_reset_state(ena_dev, timeout, ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); if (rc != 0) { netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n"); return rc; } /* reset done */ writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); rc = wait_for_reset_state(ena_dev, timeout, 0); if (rc != 0) { netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n"); return rc; } timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >> ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT; if (timeout) /* the resolution of timeout reg is 100ms */ ena_dev->admin_queue.completion_timeout = timeout * 100000; else ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US; return 0; } static int ena_get_dev_stats(struct ena_com_dev *ena_dev, struct ena_com_stats_ctx *ctx, enum ena_admin_get_stats_type type) { struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; struct ena_com_admin_queue *admin_queue; int ret; admin_queue = &ena_dev->admin_queue; get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; get_cmd->aq_common_descriptor.flags = 0; get_cmd->type = type; ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)get_cmd, sizeof(*get_cmd), (struct ena_admin_acq_entry *)get_resp, sizeof(*get_resp)); if (unlikely(ret)) netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret); return ret; } int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, struct ena_admin_eni_stats *stats) { struct ena_com_stats_ctx ctx; int ret; if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) { netdev_err(ena_dev->net_device, "Capability %d isn't supported\n", ENA_ADMIN_ENI_STATS); return -EOPNOTSUPP; } memset(&ctx, 0x0, sizeof(ctx)); ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI); if (likely(ret == 0)) memcpy(stats, &ctx.get_resp.u.eni_stats, sizeof(ctx.get_resp.u.eni_stats)); return ret; } int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, struct ena_admin_basic_stats *stats) { struct ena_com_stats_ctx ctx; int ret; memset(&ctx, 0x0, sizeof(ctx)); ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); if (likely(ret == 0)) memcpy(stats, &ctx.get_resp.u.basic_stats, sizeof(ctx.get_resp.u.basic_stats)); return ret; } int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu) { struct ena_com_admin_queue *admin_queue; struct ena_admin_set_feat_cmd cmd; struct ena_admin_set_feat_resp resp; int ret; if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU); return -EOPNOTSUPP; } memset(&cmd, 0x0, sizeof(cmd)); admin_queue = &ena_dev->admin_queue; cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; cmd.aq_common_descriptor.flags = 0; cmd.feat_common.feature_id = ENA_ADMIN_MTU; cmd.u.mtu.mtu = mtu; ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&cmd, sizeof(cmd), (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret); return ret; } int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, struct ena_admin_feature_offload_desc *offload) { int ret; struct ena_admin_get_feat_resp resp; ret = ena_com_get_feature(ena_dev, &resp, ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); if (unlikely(ret)) { netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret); return ret; } memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); return 0; } int ena_com_set_hash_function(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; struct ena_rss *rss = &ena_dev->rss; struct ena_admin_set_feat_cmd cmd; struct ena_admin_set_feat_resp resp; struct ena_admin_get_feat_resp get_resp; int ret; if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) { netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_FUNCTION); return -EOPNOTSUPP; } /* Validate hash function is supported */ ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_RSS_HASH_FUNCTION, 0); if (unlikely(ret)) return ret; if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n", rss->hash_func); return -EOPNOTSUPP; } memset(&cmd, 0x0, sizeof(cmd)); cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; cmd.aq_common_descriptor.flags = ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION; cmd.u.flow_hash_func.init_val = rss->hash_init_val; cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; ret = ena_com_mem_addr_set(ena_dev, &cmd.control_buffer.address, rss->hash_key_dma_addr); if (unlikely(ret)) { netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } cmd.control_buffer.length = sizeof(*rss->hash_key); ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&cmd, sizeof(cmd), (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) { netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n", rss->hash_func, ret); return -EINVAL; } return 0; } int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, enum ena_admin_hash_functions func, const u8 *key, u16 key_len, u32 init_val) { struct ena_admin_feature_rss_flow_hash_control *hash_key; struct ena_admin_get_feat_resp get_resp; enum ena_admin_hash_functions old_func; struct ena_rss *rss = &ena_dev->rss; int rc; hash_key = rss->hash_key; /* Make sure size is a mult of DWs */ if (unlikely(key_len & 0x3)) return -EINVAL; rc = ena_com_get_feature_ex(ena_dev, &get_resp, ENA_ADMIN_RSS_HASH_FUNCTION, rss->hash_key_dma_addr, sizeof(*rss->hash_key), 0); if (unlikely(rc)) return rc; if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) { netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func); return -EOPNOTSUPP; } if ((func == ENA_ADMIN_TOEPLITZ) && key) { if (key_len != sizeof(hash_key->key)) { netdev_err(ena_dev->net_device, "key len (%u) doesn't equal the supported size (%zu)\n", key_len, sizeof(hash_key->key)); return -EINVAL; } memcpy(hash_key->key, key, key_len); hash_key->key_parts = key_len / sizeof(hash_key->key[0]); } rss->hash_init_val = init_val; old_func = rss->hash_func; rss->hash_func = func; rc = ena_com_set_hash_function(ena_dev); /* Restore the old function */ if (unlikely(rc)) rss->hash_func = old_func; return rc; } int ena_com_get_hash_function(struct ena_com_dev *ena_dev, enum ena_admin_hash_functions *func) { struct ena_rss *rss = &ena_dev->rss; struct ena_admin_get_feat_resp get_resp; int rc; if (unlikely(!func)) return -EINVAL; rc = ena_com_get_feature_ex(ena_dev, &get_resp, ENA_ADMIN_RSS_HASH_FUNCTION, rss->hash_key_dma_addr, sizeof(*rss->hash_key), 0); if (unlikely(rc)) return rc; /* ffs() returns 1 in case the lsb is set */ rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func); if (rss->hash_func) rss->hash_func--; *func = rss->hash_func; return 0; } int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key) { struct ena_admin_feature_rss_flow_hash_control *hash_key = ena_dev->rss.hash_key; if (key) memcpy(key, hash_key->key, (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0])); return 0; } int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, enum ena_admin_flow_hash_proto proto, u16 *fields) { struct ena_rss *rss = &ena_dev->rss; struct ena_admin_get_feat_resp get_resp; int rc; rc = ena_com_get_feature_ex(ena_dev, &get_resp, ENA_ADMIN_RSS_HASH_INPUT, rss->hash_ctrl_dma_addr, sizeof(*rss->hash_ctrl), 0); if (unlikely(rc)) return rc; if (fields) *fields = rss->hash_ctrl->selected_fields[proto].fields; return 0; } int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; struct ena_rss *rss = &ena_dev->rss; struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; struct ena_admin_set_feat_cmd cmd; struct ena_admin_set_feat_resp resp; int ret; if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) { netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT); return -EOPNOTSUPP; } memset(&cmd, 0x0, sizeof(cmd)); cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; cmd.aq_common_descriptor.flags = ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT; cmd.u.flow_hash_input.enabled_input_sort = ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK | ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; ret = ena_com_mem_addr_set(ena_dev, &cmd.control_buffer.address, rss->hash_ctrl_dma_addr); if (unlikely(ret)) { netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } cmd.control_buffer.length = sizeof(*hash_ctrl); ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&cmd, sizeof(cmd), (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret); return ret; } int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; u16 available_fields = 0; int rc, i; /* Get the supported hash input */ rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL); if (unlikely(rc)) return rc; hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { available_fields = hash_ctrl->selected_fields[i].fields & hash_ctrl->supported_fields[i].fields; if (available_fields != hash_ctrl->selected_fields[i].fields) { netdev_err(ena_dev->net_device, "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", i, hash_ctrl->supported_fields[i].fields, hash_ctrl->selected_fields[i].fields); return -EOPNOTSUPP; } } rc = ena_com_set_hash_ctrl(ena_dev); /* In case of failure, restore the old hash ctrl */ if (unlikely(rc)) ena_com_get_hash_ctrl(ena_dev, 0, NULL); return rc; } int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, enum ena_admin_flow_hash_proto proto, u16 hash_fields) { struct ena_rss *rss = &ena_dev->rss; struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; u16 supported_fields; int rc; if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto); return -EINVAL; } /* Get the ctrl table */ rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL); if (unlikely(rc)) return rc; /* Make sure all the fields are supported */ supported_fields = hash_ctrl->supported_fields[proto].fields; if ((hash_fields & supported_fields) != hash_fields) { netdev_err(ena_dev->net_device, "Proto %d doesn't support the required fields %x. supports only: %x\n", proto, hash_fields, supported_fields); } hash_ctrl->selected_fields[proto].fields = hash_fields; rc = ena_com_set_hash_ctrl(ena_dev); /* In case of failure, restore the old hash ctrl */ if (unlikely(rc)) ena_com_get_hash_ctrl(ena_dev, 0, NULL); return 0; } int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, u16 entry_idx, u16 entry_value) { struct ena_rss *rss = &ena_dev->rss; if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) return -EINVAL; if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES))) return -EINVAL; rss->host_rss_ind_tbl[entry_idx] = entry_value; return 0; } int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; struct ena_rss *rss = &ena_dev->rss; struct ena_admin_set_feat_cmd cmd; struct ena_admin_set_feat_resp resp; int ret; if (!ena_com_check_supported_feature_id( ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) { netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG); return -EOPNOTSUPP; } ret = ena_com_ind_tbl_convert_to_device(ena_dev); if (ret) { netdev_err(ena_dev->net_device, "Failed to convert host indirection table to device table\n"); return ret; } memset(&cmd, 0x0, sizeof(cmd)); cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; cmd.aq_common_descriptor.flags = ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG; cmd.u.ind_table.size = rss->tbl_log_size; cmd.u.ind_table.inline_index = 0xFFFFFFFF; ret = ena_com_mem_addr_set(ena_dev, &cmd.control_buffer.address, rss->rss_ind_tbl_dma_addr); if (unlikely(ret)) { netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * sizeof(struct ena_admin_rss_ind_table_entry); ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&cmd, sizeof(cmd), (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret); return ret; } int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) { struct ena_rss *rss = &ena_dev->rss; struct ena_admin_get_feat_resp get_resp; u32 tbl_size; int i, rc; tbl_size = (1ULL << rss->tbl_log_size) * sizeof(struct ena_admin_rss_ind_table_entry); rc = ena_com_get_feature_ex(ena_dev, &get_resp, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, rss->rss_ind_tbl_dma_addr, tbl_size, 0); if (unlikely(rc)) return rc; if (!ind_tbl) return 0; for (i = 0; i < (1 << rss->tbl_log_size); i++) ind_tbl[i] = rss->host_rss_ind_tbl[i]; return 0; } int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) { int rc; memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size); if (unlikely(rc)) goto err_indr_tbl; /* The following function might return unsupported in case the * device doesn't support setting the key / hash function. We can safely * ignore this error and have indirection table support only. */ rc = ena_com_hash_key_allocate(ena_dev); if (likely(!rc)) ena_com_hash_key_fill_default_key(ena_dev); else if (rc != -EOPNOTSUPP) goto err_hash_key; rc = ena_com_hash_ctrl_init(ena_dev); if (unlikely(rc)) goto err_hash_ctrl; return 0; err_hash_ctrl: ena_com_hash_key_destroy(ena_dev); err_hash_key: ena_com_indirect_table_destroy(ena_dev); err_indr_tbl: return rc; } void ena_com_rss_destroy(struct ena_com_dev *ena_dev) { ena_com_indirect_table_destroy(ena_dev); ena_com_hash_key_destroy(ena_dev); ena_com_hash_ctrl_destroy(ena_dev); memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); } int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) { struct ena_host_attribute *host_attr = &ena_dev->host_attr; host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K, &host_attr->host_info_dma_addr, GFP_KERNEL); if (unlikely(!host_attr->host_info)) return -ENOMEM; host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR << ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) | (ENA_COMMON_SPEC_VERSION_MINOR)); return 0; } int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, u32 debug_area_size) { struct ena_host_attribute *host_attr = &ena_dev->host_attr; host_attr->debug_area_virt_addr = dma_alloc_coherent(ena_dev->dmadev, debug_area_size, &host_attr->debug_area_dma_addr, GFP_KERNEL); if (unlikely(!host_attr->debug_area_virt_addr)) { host_attr->debug_area_size = 0; return -ENOMEM; } host_attr->debug_area_size = debug_area_size; return 0; } void ena_com_delete_host_info(struct ena_com_dev *ena_dev) { struct ena_host_attribute *host_attr = &ena_dev->host_attr; if (host_attr->host_info) { dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info, host_attr->host_info_dma_addr); host_attr->host_info = NULL; } } void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) { struct ena_host_attribute *host_attr = &ena_dev->host_attr; if (host_attr->debug_area_virt_addr) { dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size, host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr); host_attr->debug_area_virt_addr = NULL; } } int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) { struct ena_host_attribute *host_attr = &ena_dev->host_attr; struct ena_com_admin_queue *admin_queue; struct ena_admin_set_feat_cmd cmd; struct ena_admin_set_feat_resp resp; int ret; /* Host attribute config is called before ena_com_get_dev_attr_feat * so ena_com can't check if the feature is supported. */ memset(&cmd, 0x0, sizeof(cmd)); admin_queue = &ena_dev->admin_queue; cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG; ret = ena_com_mem_addr_set(ena_dev, &cmd.u.host_attr.debug_ba, host_attr->debug_area_dma_addr); if (unlikely(ret)) { netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } ret = ena_com_mem_addr_set(ena_dev, &cmd.u.host_attr.os_info_ba, host_attr->host_info_dma_addr); if (unlikely(ret)) { netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&cmd, sizeof(cmd), (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret); return ret; } /* Interrupt moderation */ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) { return ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_INTERRUPT_MODERATION); } static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev, u32 coalesce_usecs, u32 intr_delay_resolution, u32 *intr_moder_interval) { if (!intr_delay_resolution) { netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n"); return -EFAULT; } *intr_moder_interval = coalesce_usecs / intr_delay_resolution; return 0; } int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, u32 tx_coalesce_usecs) { return ena_com_update_nonadaptive_moderation_interval(ena_dev, tx_coalesce_usecs, ena_dev->intr_delay_resolution, &ena_dev->intr_moder_tx_interval); } int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, u32 rx_coalesce_usecs) { return ena_com_update_nonadaptive_moderation_interval(ena_dev, rx_coalesce_usecs, ena_dev->intr_delay_resolution, &ena_dev->intr_moder_rx_interval); } int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) { struct ena_admin_get_feat_resp get_resp; u16 delay_resolution; int rc; rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_INTERRUPT_MODERATION, 0); if (rc) { if (rc == -EOPNOTSUPP) { netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_INTERRUPT_MODERATION); rc = 0; } else { netdev_err(ena_dev->net_device, "Failed to get interrupt moderation admin cmd. rc: %d\n", rc); } /* no moderation supported, disable adaptive support */ ena_com_disable_adaptive_moderation(ena_dev); return rc; } /* if moderation is supported by device we set adaptive moderation */ delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); /* Disable adaptive moderation by default - can be enabled later */ ena_com_disable_adaptive_moderation(ena_dev); return 0; } unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) { return ena_dev->intr_moder_tx_interval; } unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) { return ena_dev->intr_moder_rx_interval; } int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, struct ena_admin_feature_llq_desc *llq_features, struct ena_llq_configurations *llq_default_cfg) { struct ena_com_llq_info *llq_info = &ena_dev->llq_info; int rc; if (!llq_features->max_llq_num) { ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; return 0; } rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg); if (rc) return rc; ena_dev->tx_max_header_size = llq_info->desc_list_entry_size - (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); if (unlikely(ena_dev->tx_max_header_size == 0)) { netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n"); return -EINVAL; } ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; return 0; }
linux-master
drivers/net/ethernet/amazon/ena/ena_com.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #ifdef CONFIG_RFS_ACCEL #include <linux/cpu_rmap.h> #endif /* CONFIG_RFS_ACCEL */ #include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/numa.h> #include <linux/pci.h> #include <linux/utsname.h> #include <linux/version.h> #include <linux/vmalloc.h> #include <net/ip.h> #include "ena_netdev.h" #include <linux/bpf_trace.h> #include "ena_pci_id_tbl.h" MODULE_AUTHOR("Amazon.com, Inc. or its affiliates"); MODULE_DESCRIPTION(DEVICE_NAME); MODULE_LICENSE("GPL"); /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (5 * HZ) #define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus()) #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) static struct ena_aenq_handlers aenq_handlers; static struct workqueue_struct *ena_wq; MODULE_DEVICE_TABLE(pci, ena_pci_tbl); static int ena_rss_init_default(struct ena_adapter *adapter); static void check_for_admin_com_state(struct ena_adapter *adapter); static void ena_destroy_device(struct ena_adapter *adapter, bool graceful); static int ena_restore_device(struct ena_adapter *adapter); static void ena_init_io_rings(struct ena_adapter *adapter, int first_index, int count); static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index, int count); static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index, int count); static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid); static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter, int first_index, int count); static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid); static void ena_free_tx_resources(struct ena_adapter *adapter, int qid); static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget); static void ena_destroy_all_tx_queues(struct ena_adapter *adapter); static void ena_free_all_io_tx_resources(struct ena_adapter *adapter); static void ena_napi_disable_in_range(struct ena_adapter *adapter, int first_index, int count); static void ena_napi_enable_in_range(struct ena_adapter *adapter, int first_index, int count); static int ena_up(struct ena_adapter *adapter); static void ena_down(struct ena_adapter *adapter); static void ena_unmask_interrupt(struct ena_ring *tx_ring, struct ena_ring *rx_ring); static void ena_update_ring_numa_node(struct ena_ring *tx_ring, struct ena_ring *rx_ring); static void ena_unmap_tx_buff(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info); static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, int first_index, int count); /* Increase a stat by cnt while holding syncp seqlock on 32bit machines */ static void ena_increase_stat(u64 *statp, u64 cnt, struct u64_stats_sync *syncp) { u64_stats_update_begin(syncp); (*statp) += cnt; u64_stats_update_end(syncp); } static void ena_ring_tx_doorbell(struct ena_ring *tx_ring) { ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp); } static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct ena_adapter *adapter = netdev_priv(dev); /* Change the state of the device to trigger reset * Check that we are not in the middle or a trigger already */ if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) return; ena_reset_device(adapter, ENA_REGS_RESET_OS_NETDEV_WD); ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp); netif_err(adapter, tx_err, dev, "Transmit time out\n"); } static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu) { int i; for (i = 0; i < adapter->num_io_queues; i++) adapter->rx_ring[i].mtu = mtu; } static int ena_change_mtu(struct net_device *dev, int new_mtu) { struct ena_adapter *adapter = netdev_priv(dev); int ret; ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); if (!ret) { netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu); update_rx_ring_mtu(adapter, new_mtu); dev->mtu = new_mtu; } else { netif_err(adapter, drv, dev, "Failed to set MTU to %d\n", new_mtu); } return ret; } static int ena_xmit_common(struct net_device *dev, struct ena_ring *ring, struct ena_tx_buffer *tx_info, struct ena_com_tx_ctx *ena_tx_ctx, u16 next_to_use, u32 bytes) { struct ena_adapter *adapter = netdev_priv(dev); int rc, nb_hw_desc; if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq, ena_tx_ctx))) { netif_dbg(adapter, tx_queued, dev, "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", ring->qid); ena_ring_tx_doorbell(ring); } /* prepare the packet's descriptors to dma engine */ rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx, &nb_hw_desc); /* In case there isn't enough space in the queue for the packet, * we simply drop it. All other failure reasons of * ena_com_prepare_tx() are fatal and therefore require a device reset. */ if (unlikely(rc)) { netif_err(adapter, tx_queued, dev, "Failed to prepare tx bufs\n"); ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp); if (rc != -ENOMEM) ena_reset_device(adapter, ENA_REGS_RESET_DRIVER_INVALID_STATE); return rc; } u64_stats_update_begin(&ring->syncp); ring->tx_stats.cnt++; ring->tx_stats.bytes += bytes; u64_stats_update_end(&ring->syncp); tx_info->tx_descs = nb_hw_desc; tx_info->last_jiffies = jiffies; tx_info->print_once = 0; ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, ring->ring_size); return 0; } /* This is the XDP napi callback. XDP queues use a separate napi callback * than Rx/Tx queues. */ static int ena_xdp_io_poll(struct napi_struct *napi, int budget) { struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); u32 xdp_work_done, xdp_budget; struct ena_ring *xdp_ring; int napi_comp_call = 0; int ret; xdp_ring = ena_napi->xdp_ring; xdp_budget = budget; if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) || test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) { napi_complete_done(napi, 0); return 0; } xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget); /* If the device is about to reset or down, avoid unmask * the interrupt and return 0 so NAPI won't reschedule */ if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) { napi_complete_done(napi, 0); ret = 0; } else if (xdp_budget > xdp_work_done) { napi_comp_call = 1; if (napi_complete_done(napi, xdp_work_done)) ena_unmask_interrupt(xdp_ring, NULL); ena_update_ring_numa_node(xdp_ring, NULL); ret = xdp_work_done; } else { ret = xdp_budget; } u64_stats_update_begin(&xdp_ring->syncp); xdp_ring->tx_stats.napi_comp += napi_comp_call; xdp_ring->tx_stats.tx_poll++; u64_stats_update_end(&xdp_ring->syncp); xdp_ring->tx_stats.last_napi_jiffies = jiffies; return ret; } static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring, struct ena_tx_buffer *tx_info, struct xdp_frame *xdpf, struct ena_com_tx_ctx *ena_tx_ctx) { struct ena_adapter *adapter = xdp_ring->adapter; struct ena_com_buf *ena_buf; int push_len = 0; dma_addr_t dma; void *data; u32 size; tx_info->xdpf = xdpf; data = tx_info->xdpf->data; size = tx_info->xdpf->len; if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { /* Designate part of the packet for LLQ */ push_len = min_t(u32, size, xdp_ring->tx_max_header_size); ena_tx_ctx->push_header = data; size -= push_len; data += push_len; } ena_tx_ctx->header_len = push_len; if (size > 0) { dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(xdp_ring->dev, dma))) goto error_report_dma_error; tx_info->map_linear_data = 0; ena_buf = tx_info->bufs; ena_buf->paddr = dma; ena_buf->len = size; ena_tx_ctx->ena_bufs = ena_buf; ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1; } return 0; error_report_dma_error: ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1, &xdp_ring->syncp); netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n"); return -EINVAL; } static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring, struct net_device *dev, struct xdp_frame *xdpf, int flags) { struct ena_com_tx_ctx ena_tx_ctx = {}; struct ena_tx_buffer *tx_info; u16 next_to_use, req_id; int rc; next_to_use = xdp_ring->next_to_use; req_id = xdp_ring->free_ids[next_to_use]; tx_info = &xdp_ring->tx_buffer_info[req_id]; tx_info->num_of_bufs = 0; rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx); if (unlikely(rc)) return rc; ena_tx_ctx.req_id = req_id; rc = ena_xmit_common(dev, xdp_ring, tx_info, &ena_tx_ctx, next_to_use, xdpf->len); if (rc) goto error_unmap_dma; /* trigger the dma engine. ena_ring_tx_doorbell() * calls a memory barrier inside it. */ if (flags & XDP_XMIT_FLUSH) ena_ring_tx_doorbell(xdp_ring); return rc; error_unmap_dma: ena_unmap_tx_buff(xdp_ring, tx_info); tx_info->xdpf = NULL; return rc; } static int ena_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { struct ena_adapter *adapter = netdev_priv(dev); struct ena_ring *xdp_ring; int qid, i, nxmit = 0; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) return -ENETDOWN; /* We assume that all rings have the same XDP program */ if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog)) return -ENXIO; qid = smp_processor_id() % adapter->xdp_num_queues; qid += adapter->xdp_first_ring; xdp_ring = &adapter->tx_ring[qid]; /* Other CPU ids might try to send thorugh this queue */ spin_lock(&xdp_ring->xdp_tx_lock); for (i = 0; i < n; i++) { if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0)) break; nxmit++; } /* Ring doorbell to make device aware of the packets */ if (flags & XDP_XMIT_FLUSH) ena_ring_tx_doorbell(xdp_ring); spin_unlock(&xdp_ring->xdp_tx_lock); /* Return number of packets sent */ return nxmit; } static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) { u32 verdict = ENA_XDP_PASS; struct bpf_prog *xdp_prog; struct ena_ring *xdp_ring; struct xdp_frame *xdpf; u64 *xdp_stat; xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); if (!xdp_prog) goto out; verdict = bpf_prog_run_xdp(xdp_prog, xdp); switch (verdict) { case XDP_TX: xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) { trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_aborted; verdict = ENA_XDP_DROP; break; } /* Find xmit queue */ xdp_ring = rx_ring->xdp_ring; /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */ spin_lock(&xdp_ring->xdp_tx_lock); if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, XDP_XMIT_FLUSH)) xdp_return_frame(xdpf); spin_unlock(&xdp_ring->xdp_tx_lock); xdp_stat = &rx_ring->rx_stats.xdp_tx; verdict = ENA_XDP_TX; break; case XDP_REDIRECT: if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) { xdp_stat = &rx_ring->rx_stats.xdp_redirect; verdict = ENA_XDP_REDIRECT; break; } trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_aborted; verdict = ENA_XDP_DROP; break; case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_aborted; verdict = ENA_XDP_DROP; break; case XDP_DROP: xdp_stat = &rx_ring->rx_stats.xdp_drop; verdict = ENA_XDP_DROP; break; case XDP_PASS: xdp_stat = &rx_ring->rx_stats.xdp_pass; verdict = ENA_XDP_PASS; break; default: bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_invalid; verdict = ENA_XDP_DROP; } ena_increase_stat(xdp_stat, 1, &rx_ring->syncp); out: return verdict; } static void ena_init_all_xdp_queues(struct ena_adapter *adapter) { adapter->xdp_first_ring = adapter->num_io_queues; adapter->xdp_num_queues = adapter->num_io_queues; ena_init_io_rings(adapter, adapter->xdp_first_ring, adapter->xdp_num_queues); } static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter) { int rc = 0; rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring, adapter->xdp_num_queues); if (rc) goto setup_err; rc = ena_create_io_tx_queues_in_range(adapter, adapter->xdp_first_ring, adapter->xdp_num_queues); if (rc) goto create_err; return 0; create_err: ena_free_all_io_tx_resources(adapter); setup_err: return rc; } /* Provides a way for both kernel and bpf-prog to know * more about the RX-queue a given XDP frame arrived on. */ static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring) { int rc; rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0); if (rc) { netif_err(rx_ring->adapter, ifup, rx_ring->netdev, "Failed to register xdp rx queue info. RX queue num %d rc: %d\n", rx_ring->qid, rc); goto err; } rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); if (rc) { netif_err(rx_ring->adapter, ifup, rx_ring->netdev, "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n", rx_ring->qid, rc); xdp_rxq_info_unreg(&rx_ring->xdp_rxq); } err: return rc; } static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring) { xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq); xdp_rxq_info_unreg(&rx_ring->xdp_rxq); } static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter, struct bpf_prog *prog, int first, int count) { struct bpf_prog *old_bpf_prog; struct ena_ring *rx_ring; int i = 0; for (i = first; i < count; i++) { rx_ring = &adapter->rx_ring[i]; old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog); if (!old_bpf_prog && prog) { ena_xdp_register_rxq_info(rx_ring); rx_ring->rx_headroom = XDP_PACKET_HEADROOM; } else if (old_bpf_prog && !prog) { ena_xdp_unregister_rxq_info(rx_ring); rx_ring->rx_headroom = NET_SKB_PAD; } } } static void ena_xdp_exchange_program(struct ena_adapter *adapter, struct bpf_prog *prog) { struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog); ena_xdp_exchange_program_rx_in_range(adapter, prog, 0, adapter->num_io_queues); if (old_bpf_prog) bpf_prog_put(old_bpf_prog); } static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter) { bool was_up; int rc; was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); if (was_up) ena_down(adapter); adapter->xdp_first_ring = 0; adapter->xdp_num_queues = 0; ena_xdp_exchange_program(adapter, NULL); if (was_up) { rc = ena_up(adapter); if (rc) return rc; } return 0; } static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf) { struct ena_adapter *adapter = netdev_priv(netdev); struct bpf_prog *prog = bpf->prog; struct bpf_prog *old_bpf_prog; int rc, prev_mtu; bool is_up; is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); rc = ena_xdp_allowed(adapter); if (rc == ENA_XDP_ALLOWED) { old_bpf_prog = adapter->xdp_bpf_prog; if (prog) { if (!is_up) { ena_init_all_xdp_queues(adapter); } else if (!old_bpf_prog) { ena_down(adapter); ena_init_all_xdp_queues(adapter); } ena_xdp_exchange_program(adapter, prog); if (is_up && !old_bpf_prog) { rc = ena_up(adapter); if (rc) return rc; } xdp_features_set_redirect_target(netdev, false); } else if (old_bpf_prog) { xdp_features_clear_redirect_target(netdev); rc = ena_destroy_and_free_all_xdp_queues(adapter); if (rc) return rc; } prev_mtu = netdev->max_mtu; netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu; if (!old_bpf_prog) netif_info(adapter, drv, adapter->netdev, "XDP program is set, changing the max_mtu from %d to %d", prev_mtu, netdev->max_mtu); } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) { netif_err(adapter, drv, adapter->netdev, "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on", netdev->mtu, ENA_XDP_MAX_MTU); NL_SET_ERR_MSG_MOD(bpf->extack, "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info"); return -EINVAL; } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) { netif_err(adapter, drv, adapter->netdev, "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n", adapter->num_io_queues, adapter->max_num_io_queues); NL_SET_ERR_MSG_MOD(bpf->extack, "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info"); return -EINVAL; } return 0; } /* This is the main xdp callback, it's used by the kernel to set/unset the xdp * program as well as to query the current xdp program id. */ static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf) { switch (bpf->command) { case XDP_SETUP_PROG: return ena_xdp_set(netdev, bpf); default: return -EINVAL; } return 0; } static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter) { #ifdef CONFIG_RFS_ACCEL u32 i; int rc; adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues); if (!adapter->netdev->rx_cpu_rmap) return -ENOMEM; for (i = 0; i < adapter->num_io_queues; i++) { int irq_idx = ENA_IO_IRQ_IDX(i); rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap, pci_irq_vector(adapter->pdev, irq_idx)); if (rc) { free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); adapter->netdev->rx_cpu_rmap = NULL; return rc; } } #endif /* CONFIG_RFS_ACCEL */ return 0; } static void ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, u16 qid) { ring->qid = qid; ring->pdev = adapter->pdev; ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; ring->napi = &adapter->ena_napi[qid].napi; ring->adapter = adapter; ring->ena_dev = adapter->ena_dev; ring->per_napi_packets = 0; ring->cpu = 0; ring->numa_node = 0; ring->no_interrupt_event_cnt = 0; u64_stats_init(&ring->syncp); } static void ena_init_io_rings(struct ena_adapter *adapter, int first_index, int count) { struct ena_com_dev *ena_dev; struct ena_ring *txr, *rxr; int i; ena_dev = adapter->ena_dev; for (i = first_index; i < first_index + count; i++) { txr = &adapter->tx_ring[i]; rxr = &adapter->rx_ring[i]; /* TX common ring state */ ena_init_io_rings_common(adapter, txr, i); /* TX specific ring state */ txr->ring_size = adapter->requested_tx_ring_size; txr->tx_max_header_size = ena_dev->tx_max_header_size; txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; txr->sgl_size = adapter->max_tx_sgl_size; txr->smoothed_interval = ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); txr->disable_meta_caching = adapter->disable_meta_caching; spin_lock_init(&txr->xdp_tx_lock); /* Don't init RX queues for xdp queues */ if (!ENA_IS_XDP_INDEX(adapter, i)) { /* RX common ring state */ ena_init_io_rings_common(adapter, rxr, i); /* RX specific ring state */ rxr->ring_size = adapter->requested_rx_ring_size; rxr->rx_copybreak = adapter->rx_copybreak; rxr->sgl_size = adapter->max_rx_sgl_size; rxr->smoothed_interval = ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); rxr->empty_rx_queue = 0; rxr->rx_headroom = NET_SKB_PAD; adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues]; } } } /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors) * @adapter: network interface device structure * @qid: queue index * * Return 0 on success, negative on failure */ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) { struct ena_ring *tx_ring = &adapter->tx_ring[qid]; struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; int size, i, node; if (tx_ring->tx_buffer_info) { netif_err(adapter, ifup, adapter->netdev, "tx_buffer_info info is not NULL"); return -EEXIST; } size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; node = cpu_to_node(ena_irq->cpu); tx_ring->tx_buffer_info = vzalloc_node(size, node); if (!tx_ring->tx_buffer_info) { tx_ring->tx_buffer_info = vzalloc(size); if (!tx_ring->tx_buffer_info) goto err_tx_buffer_info; } size = sizeof(u16) * tx_ring->ring_size; tx_ring->free_ids = vzalloc_node(size, node); if (!tx_ring->free_ids) { tx_ring->free_ids = vzalloc(size); if (!tx_ring->free_ids) goto err_tx_free_ids; } size = tx_ring->tx_max_header_size; tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); if (!tx_ring->push_buf_intermediate_buf) { tx_ring->push_buf_intermediate_buf = vzalloc(size); if (!tx_ring->push_buf_intermediate_buf) goto err_push_buf_intermediate_buf; } /* Req id ring for TX out of order completions */ for (i = 0; i < tx_ring->ring_size; i++) tx_ring->free_ids[i] = i; /* Reset tx statistics */ memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; tx_ring->cpu = ena_irq->cpu; tx_ring->numa_node = node; return 0; err_push_buf_intermediate_buf: vfree(tx_ring->free_ids); tx_ring->free_ids = NULL; err_tx_free_ids: vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; err_tx_buffer_info: return -ENOMEM; } /* ena_free_tx_resources - Free I/O Tx Resources per Queue * @adapter: network interface device structure * @qid: queue index * * Free all transmit software resources */ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) { struct ena_ring *tx_ring = &adapter->tx_ring[qid]; vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; vfree(tx_ring->free_ids); tx_ring->free_ids = NULL; vfree(tx_ring->push_buf_intermediate_buf); tx_ring->push_buf_intermediate_buf = NULL; } static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter, int first_index, int count) { int i, rc = 0; for (i = first_index; i < first_index + count; i++) { rc = ena_setup_tx_resources(adapter, i); if (rc) goto err_setup_tx; } return 0; err_setup_tx: netif_err(adapter, ifup, adapter->netdev, "Tx queue %d: allocation failed\n", i); /* rewind the index freeing the rings as we go */ while (first_index < i--) ena_free_tx_resources(adapter, i); return rc; } static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter, int first_index, int count) { int i; for (i = first_index; i < first_index + count; i++) ena_free_tx_resources(adapter, i); } /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources */ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter) { ena_free_all_io_tx_resources_in_range(adapter, 0, adapter->xdp_num_queues + adapter->num_io_queues); } /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors) * @adapter: network interface device structure * @qid: queue index * * Returns 0 on success, negative on failure */ static int ena_setup_rx_resources(struct ena_adapter *adapter, u32 qid) { struct ena_ring *rx_ring = &adapter->rx_ring[qid]; struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; int size, node, i; if (rx_ring->rx_buffer_info) { netif_err(adapter, ifup, adapter->netdev, "rx_buffer_info is not NULL"); return -EEXIST; } /* alloc extra element so in rx path * we can always prefetch rx_info + 1 */ size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); node = cpu_to_node(ena_irq->cpu); rx_ring->rx_buffer_info = vzalloc_node(size, node); if (!rx_ring->rx_buffer_info) { rx_ring->rx_buffer_info = vzalloc(size); if (!rx_ring->rx_buffer_info) return -ENOMEM; } size = sizeof(u16) * rx_ring->ring_size; rx_ring->free_ids = vzalloc_node(size, node); if (!rx_ring->free_ids) { rx_ring->free_ids = vzalloc(size); if (!rx_ring->free_ids) { vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; return -ENOMEM; } } /* Req id ring for receiving RX pkts out of order */ for (i = 0; i < rx_ring->ring_size; i++) rx_ring->free_ids[i] = i; /* Reset rx statistics */ memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; rx_ring->cpu = ena_irq->cpu; rx_ring->numa_node = node; return 0; } /* ena_free_rx_resources - Free I/O Rx Resources * @adapter: network interface device structure * @qid: queue index * * Free all receive software resources */ static void ena_free_rx_resources(struct ena_adapter *adapter, u32 qid) { struct ena_ring *rx_ring = &adapter->rx_ring[qid]; vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; vfree(rx_ring->free_ids); rx_ring->free_ids = NULL; } /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues * @adapter: board private structure * * Return 0 on success, negative on failure */ static int ena_setup_all_rx_resources(struct ena_adapter *adapter) { int i, rc = 0; for (i = 0; i < adapter->num_io_queues; i++) { rc = ena_setup_rx_resources(adapter, i); if (rc) goto err_setup_rx; } return 0; err_setup_rx: netif_err(adapter, ifup, adapter->netdev, "Rx queue %d: allocation failed\n", i); /* rewind the index freeing the rings as we go */ while (i--) ena_free_rx_resources(adapter, i); return rc; } /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources */ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter) { int i; for (i = 0; i < adapter->num_io_queues; i++) ena_free_rx_resources(adapter, i); } static struct page *ena_alloc_map_page(struct ena_ring *rx_ring, dma_addr_t *dma) { struct page *page; /* This would allocate the page on the same NUMA node the executing code * is running on. */ page = dev_alloc_page(); if (!page) { ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp); return ERR_PTR(-ENOSPC); } /* To enable NIC-side port-mirroring, AKA SPAN port, * we make the buffer readable from the nic as well */ *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) { ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1, &rx_ring->syncp); __free_page(page); return ERR_PTR(-EIO); } return page; } static int ena_alloc_rx_buffer(struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) { int headroom = rx_ring->rx_headroom; struct ena_com_buf *ena_buf; struct page *page; dma_addr_t dma; int tailroom; /* restore page offset value in case it has been changed by device */ rx_info->buf_offset = headroom; /* if previous allocated page is not used */ if (unlikely(rx_info->page)) return 0; /* We handle DMA here */ page = ena_alloc_map_page(rx_ring, &dma); if (unlikely(IS_ERR(page))) return PTR_ERR(page); netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, "Allocate page %p, rx_info %p\n", page, rx_info); tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); rx_info->page = page; rx_info->dma_addr = dma; rx_info->page_offset = 0; ena_buf = &rx_info->ena_buf; ena_buf->paddr = dma + headroom; ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom; return 0; } static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info, unsigned long attrs) { dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL, attrs); } static void ena_free_rx_page(struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) { struct page *page = rx_info->page; if (unlikely(!page)) { netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, "Trying to free unallocated buffer\n"); return; } ena_unmap_rx_buff_attrs(rx_ring, rx_info, 0); __free_page(page); rx_info->page = NULL; } static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) { u16 next_to_use, req_id; u32 i; int rc; next_to_use = rx_ring->next_to_use; for (i = 0; i < num; i++) { struct ena_rx_buffer *rx_info; req_id = rx_ring->free_ids[next_to_use]; rx_info = &rx_ring->rx_buffer_info[req_id]; rc = ena_alloc_rx_buffer(rx_ring, rx_info); if (unlikely(rc < 0)) { netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, "Failed to allocate buffer for rx queue %d\n", rx_ring->qid); break; } rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, &rx_info->ena_buf, req_id); if (unlikely(rc)) { netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, "Failed to add buffer for rx queue %d\n", rx_ring->qid); break; } next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, rx_ring->ring_size); } if (unlikely(i < num)) { ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1, &rx_ring->syncp); netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, "Refilled rx qid %d with only %d buffers (from %d)\n", rx_ring->qid, i, num); } /* ena_com_write_sq_doorbell issues a wmb() */ if (likely(i)) ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); rx_ring->next_to_use = next_to_use; return i; } static void ena_free_rx_bufs(struct ena_adapter *adapter, u32 qid) { struct ena_ring *rx_ring = &adapter->rx_ring[qid]; u32 i; for (i = 0; i < rx_ring->ring_size; i++) { struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; if (rx_info->page) ena_free_rx_page(rx_ring, rx_info); } } /* ena_refill_all_rx_bufs - allocate all queues Rx buffers * @adapter: board private structure */ static void ena_refill_all_rx_bufs(struct ena_adapter *adapter) { struct ena_ring *rx_ring; int i, rc, bufs_num; for (i = 0; i < adapter->num_io_queues; i++) { rx_ring = &adapter->rx_ring[i]; bufs_num = rx_ring->ring_size - 1; rc = ena_refill_rx_bufs(rx_ring, bufs_num); if (unlikely(rc != bufs_num)) netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, "Refilling Queue %d failed. allocated %d buffers from: %d\n", i, rc, bufs_num); } } static void ena_free_all_rx_bufs(struct ena_adapter *adapter) { int i; for (i = 0; i < adapter->num_io_queues; i++) ena_free_rx_bufs(adapter, i); } static void ena_unmap_tx_buff(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info) { struct ena_com_buf *ena_buf; u32 cnt; int i; ena_buf = tx_info->bufs; cnt = tx_info->num_of_bufs; if (unlikely(!cnt)) return; if (tx_info->map_linear_data) { dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); ena_buf++; cnt--; } /* unmap remaining mapped pages */ for (i = 0; i < cnt; i++) { dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); ena_buf++; } } /* ena_free_tx_bufs - Free Tx Buffers per Queue * @tx_ring: TX ring for which buffers be freed */ static void ena_free_tx_bufs(struct ena_ring *tx_ring) { bool print_once = true; u32 i; for (i = 0; i < tx_ring->ring_size; i++) { struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; if (!tx_info->skb) continue; if (print_once) { netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev, "Free uncompleted tx skb qid %d idx 0x%x\n", tx_ring->qid, i); print_once = false; } else { netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev, "Free uncompleted tx skb qid %d idx 0x%x\n", tx_ring->qid, i); } ena_unmap_tx_buff(tx_ring, tx_info); dev_kfree_skb_any(tx_info->skb); } netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid)); } static void ena_free_all_tx_bufs(struct ena_adapter *adapter) { struct ena_ring *tx_ring; int i; for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { tx_ring = &adapter->tx_ring[i]; ena_free_tx_bufs(tx_ring); } } static void ena_destroy_all_tx_queues(struct ena_adapter *adapter) { u16 ena_qid; int i; for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { ena_qid = ENA_IO_TXQ_IDX(i); ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); } } static void ena_destroy_all_rx_queues(struct ena_adapter *adapter) { u16 ena_qid; int i; for (i = 0; i < adapter->num_io_queues; i++) { ena_qid = ENA_IO_RXQ_IDX(i); cancel_work_sync(&adapter->ena_napi[i].dim.work); ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); } } static void ena_destroy_all_io_queues(struct ena_adapter *adapter) { ena_destroy_all_tx_queues(adapter); ena_destroy_all_rx_queues(adapter); } static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id, struct ena_tx_buffer *tx_info, bool is_xdp) { if (tx_info) netif_err(ring->adapter, tx_done, ring->netdev, "tx_info doesn't have valid %s. qid %u req_id %u", is_xdp ? "xdp frame" : "skb", ring->qid, req_id); else netif_err(ring->adapter, tx_done, ring->netdev, "Invalid req_id %u in qid %u\n", req_id, ring->qid); ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp); ena_reset_device(ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID); return -EFAULT; } static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) { struct ena_tx_buffer *tx_info; tx_info = &tx_ring->tx_buffer_info[req_id]; if (likely(tx_info->skb)) return 0; return handle_invalid_req_id(tx_ring, req_id, tx_info, false); } static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id) { struct ena_tx_buffer *tx_info; tx_info = &xdp_ring->tx_buffer_info[req_id]; if (likely(tx_info->xdpf)) return 0; return handle_invalid_req_id(xdp_ring, req_id, tx_info, true); } static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) { struct netdev_queue *txq; bool above_thresh; u32 tx_bytes = 0; u32 total_done = 0; u16 next_to_clean; u16 req_id; int tx_pkts = 0; int rc; next_to_clean = tx_ring->next_to_clean; txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); while (tx_pkts < budget) { struct ena_tx_buffer *tx_info; struct sk_buff *skb; rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id); if (rc) { if (unlikely(rc == -EINVAL)) handle_invalid_req_id(tx_ring, req_id, NULL, false); break; } /* validate that the request id points to a valid skb */ rc = validate_tx_req_id(tx_ring, req_id); if (rc) break; tx_info = &tx_ring->tx_buffer_info[req_id]; skb = tx_info->skb; /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */ prefetch(&skb->end); tx_info->skb = NULL; tx_info->last_jiffies = 0; ena_unmap_tx_buff(tx_ring, tx_info); netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, "tx_poll: q %d skb %p completed\n", tx_ring->qid, skb); tx_bytes += skb->len; dev_kfree_skb(skb); tx_pkts++; total_done += tx_info->tx_descs; tx_ring->free_ids[next_to_clean] = req_id; next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, tx_ring->ring_size); } tx_ring->next_to_clean = next_to_clean; ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, "tx_poll: q %d done. total pkts: %d\n", tx_ring->qid, tx_pkts); /* need to make the rings circular update visible to * ena_start_xmit() before checking for netif_queue_stopped(). */ smp_mb(); above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, ENA_TX_WAKEUP_THRESH); if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) { __netif_tx_lock(txq, smp_processor_id()); above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, ENA_TX_WAKEUP_THRESH); if (netif_tx_queue_stopped(txq) && above_thresh && test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { netif_tx_wake_queue(txq); ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, &tx_ring->syncp); } __netif_tx_unlock(txq); } return tx_pkts; } static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag, u16 len) { struct sk_buff *skb; if (!first_frag) skb = napi_alloc_skb(rx_ring->napi, len); else skb = napi_build_skb(first_frag, len); if (unlikely(!skb)) { ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1, &rx_ring->syncp); netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, "Failed to allocate skb. first_frag %s\n", first_frag ? "provided" : "not provided"); } return skb; } static bool ena_try_rx_buf_page_reuse(struct ena_rx_buffer *rx_info, u16 buf_len, u16 len, int pkt_offset) { struct ena_com_buf *ena_buf = &rx_info->ena_buf; /* More than ENA_MIN_RX_BUF_SIZE left in the reused buffer * for data + headroom + tailroom. */ if (SKB_DATA_ALIGN(len + pkt_offset) + ENA_MIN_RX_BUF_SIZE <= ena_buf->len) { page_ref_inc(rx_info->page); rx_info->page_offset += buf_len; ena_buf->paddr += buf_len; ena_buf->len -= buf_len; return true; } return false; } static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs, u32 descs, u16 *next_to_clean) { int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); bool is_xdp_loaded = ena_xdp_present_ring(rx_ring); struct ena_rx_buffer *rx_info; struct ena_adapter *adapter; int page_offset, pkt_offset; dma_addr_t pre_reuse_paddr; u16 len, req_id, buf = 0; bool reuse_rx_buf_page; struct sk_buff *skb; void *buf_addr; int buf_offset; u16 buf_len; len = ena_bufs[buf].len; req_id = ena_bufs[buf].req_id; rx_info = &rx_ring->rx_buffer_info[req_id]; if (unlikely(!rx_info->page)) { adapter = rx_ring->adapter; netif_err(adapter, rx_err, rx_ring->netdev, "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id); ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp); ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID); return NULL; } netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, "rx_info %p page %p\n", rx_info, rx_info->page); buf_offset = rx_info->buf_offset; pkt_offset = buf_offset - rx_ring->rx_headroom; page_offset = rx_info->page_offset; buf_addr = page_address(rx_info->page) + page_offset; if (len <= rx_ring->rx_copybreak) { skb = ena_alloc_skb(rx_ring, NULL, len); if (unlikely(!skb)) return NULL; /* sync this buffer for CPU use */ dma_sync_single_for_cpu(rx_ring->dev, dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, len, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, buf_addr + buf_offset, len); dma_sync_single_for_device(rx_ring->dev, dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, len, DMA_FROM_DEVICE); skb_put(skb, len); netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, "RX allocated small packet. len %d.\n", skb->len); skb->protocol = eth_type_trans(skb, rx_ring->netdev); rx_ring->free_ids[*next_to_clean] = req_id; *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs, rx_ring->ring_size); return skb; } buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom); pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr); /* If XDP isn't loaded try to reuse part of the RX buffer */ reuse_rx_buf_page = !is_xdp_loaded && ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset); dma_sync_single_for_cpu(rx_ring->dev, pre_reuse_paddr + pkt_offset, len, DMA_FROM_DEVICE); if (!reuse_rx_buf_page) ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC); skb = ena_alloc_skb(rx_ring, buf_addr, buf_len); if (unlikely(!skb)) return NULL; /* Populate skb's linear part */ skb_reserve(skb, buf_offset); skb_put(skb, len); skb->protocol = eth_type_trans(skb, rx_ring->netdev); do { netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, "RX skb updated. len %d. data_len %d\n", skb->len, skb->data_len); if (!reuse_rx_buf_page) rx_info->page = NULL; rx_ring->free_ids[*next_to_clean] = req_id; *next_to_clean = ENA_RX_RING_IDX_NEXT(*next_to_clean, rx_ring->ring_size); if (likely(--descs == 0)) break; buf++; len = ena_bufs[buf].len; req_id = ena_bufs[buf].req_id; rx_info = &rx_ring->rx_buffer_info[req_id]; /* rx_info->buf_offset includes rx_ring->rx_headroom */ buf_offset = rx_info->buf_offset; pkt_offset = buf_offset - rx_ring->rx_headroom; buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom); page_offset = rx_info->page_offset; pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr); reuse_rx_buf_page = !is_xdp_loaded && ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset); dma_sync_single_for_cpu(rx_ring->dev, pre_reuse_paddr + pkt_offset, len, DMA_FROM_DEVICE); if (!reuse_rx_buf_page) ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, page_offset + buf_offset, len, buf_len); } while (1); return skb; } /* ena_rx_checksum - indicate in skb if hw indicated a good cksum * @adapter: structure containing adapter specific data * @ena_rx_ctx: received packet context/metadata * @skb: skb currently being received and modified */ static void ena_rx_checksum(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, struct sk_buff *skb) { /* Rx csum disabled */ if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { skb->ip_summed = CHECKSUM_NONE; return; } /* For fragmented packets the checksum isn't valid */ if (ena_rx_ctx->frag) { skb->ip_summed = CHECKSUM_NONE; return; } /* if IP and error */ if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && (ena_rx_ctx->l3_csum_err))) { /* ipv4 checksum error */ skb->ip_summed = CHECKSUM_NONE; ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, &rx_ring->syncp); netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, "RX IPv4 header checksum error\n"); return; } /* if TCP/UDP */ if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { if (unlikely(ena_rx_ctx->l4_csum_err)) { /* TCP/UDP checksum error */ ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, &rx_ring->syncp); netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, "RX L4 checksum error\n"); skb->ip_summed = CHECKSUM_NONE; return; } if (likely(ena_rx_ctx->l4_csum_checked)) { skb->ip_summed = CHECKSUM_UNNECESSARY; ena_increase_stat(&rx_ring->rx_stats.csum_good, 1, &rx_ring->syncp); } else { ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1, &rx_ring->syncp); skb->ip_summed = CHECKSUM_NONE; } } else { skb->ip_summed = CHECKSUM_NONE; return; } } static void ena_set_rx_hash(struct ena_ring *rx_ring, struct ena_com_rx_ctx *ena_rx_ctx, struct sk_buff *skb) { enum pkt_hash_types hash_type; if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) hash_type = PKT_HASH_TYPE_L4; else hash_type = PKT_HASH_TYPE_NONE; /* Override hash type if the packet is fragmented */ if (ena_rx_ctx->frag) hash_type = PKT_HASH_TYPE_NONE; skb_set_hash(skb, ena_rx_ctx->hash, hash_type); } } static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp) { struct ena_rx_buffer *rx_info; int ret; rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; xdp_prepare_buff(xdp, page_address(rx_info->page), rx_info->buf_offset, rx_ring->ena_bufs[0].len, false); /* If for some reason we received a bigger packet than * we expect, then we simply drop it */ if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU)) return ENA_XDP_DROP; ret = ena_xdp_execute(rx_ring, xdp); /* The xdp program might expand the headers */ if (ret == ENA_XDP_PASS) { rx_info->buf_offset = xdp->data - xdp->data_hard_start; rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data; } return ret; } /* ena_clean_rx_irq - Cleanup RX irq * @rx_ring: RX ring to clean * @napi: napi handler * @budget: how many packets driver is allowed to clean * * Returns the number of cleaned buffers. */ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, u32 budget) { u16 next_to_clean = rx_ring->next_to_clean; struct ena_com_rx_ctx ena_rx_ctx; struct ena_rx_buffer *rx_info; struct ena_adapter *adapter; u32 res_budget, work_done; int rx_copybreak_pkt = 0; int refill_threshold; struct sk_buff *skb; int refill_required; struct xdp_buff xdp; int xdp_flags = 0; int total_len = 0; int xdp_verdict; int rc = 0; int i; netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, "%s qid %d\n", __func__, rx_ring->qid); res_budget = budget; xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq); do { xdp_verdict = ENA_XDP_PASS; skb = NULL; ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; ena_rx_ctx.max_bufs = rx_ring->sgl_size; ena_rx_ctx.descs = 0; ena_rx_ctx.pkt_offset = 0; rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, rx_ring->ena_com_io_sq, &ena_rx_ctx); if (unlikely(rc)) goto error; if (unlikely(ena_rx_ctx.descs == 0)) break; /* First descriptor might have an offset set by the device */ rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; rx_info->buf_offset += ena_rx_ctx.pkt_offset; netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n", rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, ena_rx_ctx.l4_proto, ena_rx_ctx.hash); if (ena_xdp_present_ring(rx_ring)) xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp); /* allocate skb and fill it */ if (xdp_verdict == ENA_XDP_PASS) skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs, &next_to_clean); if (unlikely(!skb)) { for (i = 0; i < ena_rx_ctx.descs; i++) { int req_id = rx_ring->ena_bufs[i].req_id; rx_ring->free_ids[next_to_clean] = req_id; next_to_clean = ENA_RX_RING_IDX_NEXT(next_to_clean, rx_ring->ring_size); /* Packets was passed for transmission, unmap it * from RX side. */ if (xdp_verdict & ENA_XDP_FORWARDED) { ena_unmap_rx_buff_attrs(rx_ring, &rx_ring->rx_buffer_info[req_id], 0); rx_ring->rx_buffer_info[req_id].page = NULL; } } if (xdp_verdict != ENA_XDP_PASS) { xdp_flags |= xdp_verdict; total_len += ena_rx_ctx.ena_bufs[0].len; res_budget--; continue; } break; } ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); skb_record_rx_queue(skb, rx_ring->qid); if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) rx_copybreak_pkt++; total_len += skb->len; napi_gro_receive(napi, skb); res_budget--; } while (likely(res_budget)); work_done = budget - res_budget; rx_ring->per_napi_packets += work_done; u64_stats_update_begin(&rx_ring->syncp); rx_ring->rx_stats.bytes += total_len; rx_ring->rx_stats.cnt += work_done; rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; u64_stats_update_end(&rx_ring->syncp); rx_ring->next_to_clean = next_to_clean; refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); refill_threshold = min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, ENA_RX_REFILL_THRESH_PACKET); /* Optimization, try to batch new rx buffers */ if (refill_required > refill_threshold) { ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); ena_refill_rx_bufs(rx_ring, refill_required); } if (xdp_flags & ENA_XDP_REDIRECT) xdp_do_flush_map(); return work_done; error: if (xdp_flags & ENA_XDP_REDIRECT) xdp_do_flush(); adapter = netdev_priv(rx_ring->netdev); if (rc == -ENOSPC) { ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp); ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS); } else { ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp); ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID); } return 0; } static void ena_dim_work(struct work_struct *w) { struct dim *dim = container_of(w, struct dim, work); struct dim_cq_moder cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim); ena_napi->rx_ring->smoothed_interval = cur_moder.usec; dim->state = DIM_START_MEASURE; } static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi) { struct dim_sample dim_sample; struct ena_ring *rx_ring = ena_napi->rx_ring; if (!rx_ring->per_napi_packets) return; rx_ring->non_empty_napi_events++; dim_update_sample(rx_ring->non_empty_napi_events, rx_ring->rx_stats.cnt, rx_ring->rx_stats.bytes, &dim_sample); net_dim(&ena_napi->dim, dim_sample); rx_ring->per_napi_packets = 0; } static void ena_unmask_interrupt(struct ena_ring *tx_ring, struct ena_ring *rx_ring) { u32 rx_interval = tx_ring->smoothed_interval; struct ena_eth_io_intr_reg intr_reg; /* Rx ring can be NULL when for XDP tx queues which don't have an * accompanying rx_ring pair. */ if (rx_ring) rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ? rx_ring->smoothed_interval : ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev); /* Update intr register: rx intr delay, * tx intr delay and interrupt unmask */ ena_com_update_intr_reg(&intr_reg, rx_interval, tx_ring->smoothed_interval, true); ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1, &tx_ring->syncp); /* It is a shared MSI-X. * Tx and Rx CQ have pointer to it. * So we use one of them to reach the intr reg * The Tx ring is used because the rx_ring is NULL for XDP queues */ ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg); } static void ena_update_ring_numa_node(struct ena_ring *tx_ring, struct ena_ring *rx_ring) { int cpu = get_cpu(); int numa_node; /* Check only one ring since the 2 rings are running on the same cpu */ if (likely(tx_ring->cpu == cpu)) goto out; tx_ring->cpu = cpu; if (rx_ring) rx_ring->cpu = cpu; numa_node = cpu_to_node(cpu); if (likely(tx_ring->numa_node == numa_node)) goto out; put_cpu(); if (numa_node != NUMA_NO_NODE) { ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); tx_ring->numa_node = numa_node; if (rx_ring) { rx_ring->numa_node = numa_node; ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node); } } return; out: put_cpu(); } static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget) { u32 total_done = 0; u16 next_to_clean; int tx_pkts = 0; u16 req_id; int rc; if (unlikely(!xdp_ring)) return 0; next_to_clean = xdp_ring->next_to_clean; while (tx_pkts < budget) { struct ena_tx_buffer *tx_info; struct xdp_frame *xdpf; rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq, &req_id); if (rc) { if (unlikely(rc == -EINVAL)) handle_invalid_req_id(xdp_ring, req_id, NULL, true); break; } /* validate that the request id points to a valid xdp_frame */ rc = validate_xdp_req_id(xdp_ring, req_id); if (rc) break; tx_info = &xdp_ring->tx_buffer_info[req_id]; xdpf = tx_info->xdpf; tx_info->xdpf = NULL; tx_info->last_jiffies = 0; ena_unmap_tx_buff(xdp_ring, tx_info); netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev, "tx_poll: q %d skb %p completed\n", xdp_ring->qid, xdpf); tx_pkts++; total_done += tx_info->tx_descs; xdp_return_frame(xdpf); xdp_ring->free_ids[next_to_clean] = req_id; next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, xdp_ring->ring_size); } xdp_ring->next_to_clean = next_to_clean; ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done); ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq); netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev, "tx_poll: q %d done. total pkts: %d\n", xdp_ring->qid, tx_pkts); return tx_pkts; } static int ena_io_poll(struct napi_struct *napi, int budget) { struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); struct ena_ring *tx_ring, *rx_ring; int tx_work_done; int rx_work_done = 0; int tx_budget; int napi_comp_call = 0; int ret; tx_ring = ena_napi->tx_ring; rx_ring = ena_napi->rx_ring; tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { napi_complete_done(napi, 0); return 0; } tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); /* On netpoll the budget is zero and the handler should only clean the * tx completions. */ if (likely(budget)) rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); /* If the device is about to reset or down, avoid unmask * the interrupt and return 0 so NAPI won't reschedule */ if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) { napi_complete_done(napi, 0); ret = 0; } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) { napi_comp_call = 1; /* Update numa and unmask the interrupt only when schedule * from the interrupt context (vs from sk_busy_loop) */ if (napi_complete_done(napi, rx_work_done) && READ_ONCE(ena_napi->interrupts_masked)) { smp_rmb(); /* make sure interrupts_masked is read */ WRITE_ONCE(ena_napi->interrupts_masked, false); /* We apply adaptive moderation on Rx path only. * Tx uses static interrupt moderation. */ if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) ena_adjust_adaptive_rx_intr_moderation(ena_napi); ena_update_ring_numa_node(tx_ring, rx_ring); ena_unmask_interrupt(tx_ring, rx_ring); } ret = rx_work_done; } else { ret = budget; } u64_stats_update_begin(&tx_ring->syncp); tx_ring->tx_stats.napi_comp += napi_comp_call; tx_ring->tx_stats.tx_poll++; u64_stats_update_end(&tx_ring->syncp); tx_ring->tx_stats.last_napi_jiffies = jiffies; return ret; } static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data) { struct ena_adapter *adapter = (struct ena_adapter *)data; ena_com_admin_q_comp_intr_handler(adapter->ena_dev); /* Don't call the aenq handler before probe is done */ if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))) ena_com_aenq_intr_handler(adapter->ena_dev, data); return IRQ_HANDLED; } /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx * @irq: interrupt number * @data: pointer to a network interface private napi device structure */ static irqreturn_t ena_intr_msix_io(int irq, void *data) { struct ena_napi *ena_napi = data; /* Used to check HW health */ WRITE_ONCE(ena_napi->first_interrupt, true); WRITE_ONCE(ena_napi->interrupts_masked, true); smp_wmb(); /* write interrupts_masked before calling napi */ napi_schedule_irqoff(&ena_napi->napi); return IRQ_HANDLED; } /* Reserve a single MSI-X vector for management (admin + aenq). * plus reserve one vector for each potential io queue. * the number of potential io queues is the minimum of what the device * supports and the number of vCPUs. */ static int ena_enable_msix(struct ena_adapter *adapter) { int msix_vecs, irq_cnt; if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { netif_err(adapter, probe, adapter->netdev, "Error, MSI-X is already enabled\n"); return -EPERM; } /* Reserved the max msix vectors we might need */ msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); netif_dbg(adapter, probe, adapter->netdev, "Trying to enable MSI-X, vectors %d\n", msix_vecs); irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC, msix_vecs, PCI_IRQ_MSIX); if (irq_cnt < 0) { netif_err(adapter, probe, adapter->netdev, "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt); return -ENOSPC; } if (irq_cnt != msix_vecs) { netif_notice(adapter, probe, adapter->netdev, "Enable only %d MSI-X (out of %d), reduce the number of queues\n", irq_cnt, msix_vecs); adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC; } if (ena_init_rx_cpu_rmap(adapter)) netif_warn(adapter, probe, adapter->netdev, "Failed to map IRQs to CPUs\n"); adapter->msix_vecs = irq_cnt; set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags); return 0; } static void ena_setup_mgmnt_intr(struct ena_adapter *adapter) { u32 cpu; snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", pci_name(adapter->pdev)); adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = ena_intr_msix_mgmnt; adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX); cpu = cpumask_first(cpu_online_mask); adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu; cpumask_set_cpu(cpu, &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask); } static void ena_setup_io_intr(struct ena_adapter *adapter) { struct net_device *netdev; int irq_idx, i, cpu; int io_queue_count; netdev = adapter->netdev; io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; for (i = 0; i < io_queue_count; i++) { irq_idx = ENA_IO_IRQ_IDX(i); cpu = i % num_online_cpus(); snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, "%s-Tx-Rx-%d", netdev->name, i); adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io; adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i]; adapter->irq_tbl[irq_idx].vector = pci_irq_vector(adapter->pdev, irq_idx); adapter->irq_tbl[irq_idx].cpu = cpu; cpumask_set_cpu(cpu, &adapter->irq_tbl[irq_idx].affinity_hint_mask); } } static int ena_request_mgmnt_irq(struct ena_adapter *adapter) { unsigned long flags = 0; struct ena_irq *irq; int rc; irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; rc = request_irq(irq->vector, irq->handler, flags, irq->name, irq->data); if (rc) { netif_err(adapter, probe, adapter->netdev, "Failed to request admin irq\n"); return rc; } netif_dbg(adapter, probe, adapter->netdev, "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n", irq->affinity_hint_mask.bits[0], irq->vector); irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); return rc; } static int ena_request_io_irq(struct ena_adapter *adapter) { u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; unsigned long flags = 0; struct ena_irq *irq; int rc = 0, i, k; if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { netif_err(adapter, ifup, adapter->netdev, "Failed to request I/O IRQ: MSI-X is not enabled\n"); return -EINVAL; } for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { irq = &adapter->irq_tbl[i]; rc = request_irq(irq->vector, irq->handler, flags, irq->name, irq->data); if (rc) { netif_err(adapter, ifup, adapter->netdev, "Failed to request I/O IRQ. index %d rc %d\n", i, rc); goto err; } netif_dbg(adapter, ifup, adapter->netdev, "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n", i, irq->affinity_hint_mask.bits[0], irq->vector); irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); } return rc; err: for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) { irq = &adapter->irq_tbl[k]; free_irq(irq->vector, irq->data); } return rc; } static void ena_free_mgmnt_irq(struct ena_adapter *adapter) { struct ena_irq *irq; irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; synchronize_irq(irq->vector); irq_set_affinity_hint(irq->vector, NULL); free_irq(irq->vector, irq->data); } static void ena_free_io_irq(struct ena_adapter *adapter) { u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; struct ena_irq *irq; int i; #ifdef CONFIG_RFS_ACCEL if (adapter->msix_vecs >= 1) { free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); adapter->netdev->rx_cpu_rmap = NULL; } #endif /* CONFIG_RFS_ACCEL */ for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { irq = &adapter->irq_tbl[i]; irq_set_affinity_hint(irq->vector, NULL); free_irq(irq->vector, irq->data); } } static void ena_disable_msix(struct ena_adapter *adapter) { if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) pci_free_irq_vectors(adapter->pdev); } static void ena_disable_io_intr_sync(struct ena_adapter *adapter) { u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; int i; if (!netif_running(adapter->netdev)) return; for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) synchronize_irq(adapter->irq_tbl[i].vector); } static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index, int count) { int i; for (i = first_index; i < first_index + count; i++) { netif_napi_del(&adapter->ena_napi[i].napi); WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) && adapter->ena_napi[i].xdp_ring); } } static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index, int count) { int i; for (i = first_index; i < first_index + count; i++) { struct ena_napi *napi = &adapter->ena_napi[i]; netif_napi_add(adapter->netdev, &napi->napi, ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll); if (!ENA_IS_XDP_INDEX(adapter, i)) { napi->rx_ring = &adapter->rx_ring[i]; napi->tx_ring = &adapter->tx_ring[i]; } else { napi->xdp_ring = &adapter->tx_ring[i]; } napi->qid = i; } } static void ena_napi_disable_in_range(struct ena_adapter *adapter, int first_index, int count) { int i; for (i = first_index; i < first_index + count; i++) napi_disable(&adapter->ena_napi[i].napi); } static void ena_napi_enable_in_range(struct ena_adapter *adapter, int first_index, int count) { int i; for (i = first_index; i < first_index + count; i++) napi_enable(&adapter->ena_napi[i].napi); } /* Configure the Rx forwarding */ static int ena_rss_configure(struct ena_adapter *adapter) { struct ena_com_dev *ena_dev = adapter->ena_dev; int rc; /* In case the RSS table wasn't initialized by probe */ if (!ena_dev->rss.tbl_log_size) { rc = ena_rss_init_default(adapter); if (rc && (rc != -EOPNOTSUPP)) { netif_err(adapter, ifup, adapter->netdev, "Failed to init RSS rc: %d\n", rc); return rc; } } /* Set indirect table */ rc = ena_com_indirect_table_set(ena_dev); if (unlikely(rc && rc != -EOPNOTSUPP)) return rc; /* Configure hash function (if supported) */ rc = ena_com_set_hash_function(ena_dev); if (unlikely(rc && (rc != -EOPNOTSUPP))) return rc; /* Configure hash inputs (if supported) */ rc = ena_com_set_hash_ctrl(ena_dev); if (unlikely(rc && (rc != -EOPNOTSUPP))) return rc; return 0; } static int ena_up_complete(struct ena_adapter *adapter) { int rc; rc = ena_rss_configure(adapter); if (rc) return rc; ena_change_mtu(adapter->netdev, adapter->netdev->mtu); ena_refill_all_rx_bufs(adapter); /* enable transmits */ netif_tx_start_all_queues(adapter->netdev); ena_napi_enable_in_range(adapter, 0, adapter->xdp_num_queues + adapter->num_io_queues); return 0; } static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) { struct ena_com_create_io_ctx ctx; struct ena_com_dev *ena_dev; struct ena_ring *tx_ring; u32 msix_vector; u16 ena_qid; int rc; ena_dev = adapter->ena_dev; tx_ring = &adapter->tx_ring[qid]; msix_vector = ENA_IO_IRQ_IDX(qid); ena_qid = ENA_IO_TXQ_IDX(qid); memset(&ctx, 0x0, sizeof(ctx)); ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; ctx.qid = ena_qid; ctx.mem_queue_type = ena_dev->tx_mem_queue_type; ctx.msix_vector = msix_vector; ctx.queue_size = tx_ring->ring_size; ctx.numa_node = tx_ring->numa_node; rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc) { netif_err(adapter, ifup, adapter->netdev, "Failed to create I/O TX queue num %d rc: %d\n", qid, rc); return rc; } rc = ena_com_get_io_handlers(ena_dev, ena_qid, &tx_ring->ena_com_io_sq, &tx_ring->ena_com_io_cq); if (rc) { netif_err(adapter, ifup, adapter->netdev, "Failed to get TX queue handlers. TX queue num %d rc: %d\n", qid, rc); ena_com_destroy_io_queue(ena_dev, ena_qid); return rc; } ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); return rc; } static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, int first_index, int count) { struct ena_com_dev *ena_dev = adapter->ena_dev; int rc, i; for (i = first_index; i < first_index + count; i++) { rc = ena_create_io_tx_queue(adapter, i); if (rc) goto create_err; } return 0; create_err: while (i-- > first_index) ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); return rc; } static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) { struct ena_com_dev *ena_dev; struct ena_com_create_io_ctx ctx; struct ena_ring *rx_ring; u32 msix_vector; u16 ena_qid; int rc; ena_dev = adapter->ena_dev; rx_ring = &adapter->rx_ring[qid]; msix_vector = ENA_IO_IRQ_IDX(qid); ena_qid = ENA_IO_RXQ_IDX(qid); memset(&ctx, 0x0, sizeof(ctx)); ctx.qid = ena_qid; ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; ctx.msix_vector = msix_vector; ctx.queue_size = rx_ring->ring_size; ctx.numa_node = rx_ring->numa_node; rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc) { netif_err(adapter, ifup, adapter->netdev, "Failed to create I/O RX queue num %d rc: %d\n", qid, rc); return rc; } rc = ena_com_get_io_handlers(ena_dev, ena_qid, &rx_ring->ena_com_io_sq, &rx_ring->ena_com_io_cq); if (rc) { netif_err(adapter, ifup, adapter->netdev, "Failed to get RX queue handlers. RX queue num %d rc: %d\n", qid, rc); goto err; } ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); return rc; err: ena_com_destroy_io_queue(ena_dev, ena_qid); return rc; } static int ena_create_all_io_rx_queues(struct ena_adapter *adapter) { struct ena_com_dev *ena_dev = adapter->ena_dev; int rc, i; for (i = 0; i < adapter->num_io_queues; i++) { rc = ena_create_io_rx_queue(adapter, i); if (rc) goto create_err; INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work); } return 0; create_err: while (i--) { cancel_work_sync(&adapter->ena_napi[i].dim.work); ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); } return rc; } static void set_io_rings_size(struct ena_adapter *adapter, int new_tx_size, int new_rx_size) { int i; for (i = 0; i < adapter->num_io_queues; i++) { adapter->tx_ring[i].ring_size = new_tx_size; adapter->rx_ring[i].ring_size = new_rx_size; } } /* This function allows queue allocation to backoff when the system is * low on memory. If there is not enough memory to allocate io queues * the driver will try to allocate smaller queues. * * The backoff algorithm is as follows: * 1. Try to allocate TX and RX and if successful. * 1.1. return success * * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same). * * 3. If TX or RX is smaller than 256 * 3.1. return failure. * 4. else * 4.1. go back to 1. */ static int create_queues_with_size_backoff(struct ena_adapter *adapter) { int rc, cur_rx_ring_size, cur_tx_ring_size; int new_rx_ring_size, new_tx_ring_size; /* current queue sizes might be set to smaller than the requested * ones due to past queue allocation failures. */ set_io_rings_size(adapter, adapter->requested_tx_ring_size, adapter->requested_rx_ring_size); while (1) { if (ena_xdp_present(adapter)) { rc = ena_setup_and_create_all_xdp_queues(adapter); if (rc) goto err_setup_tx; } rc = ena_setup_tx_resources_in_range(adapter, 0, adapter->num_io_queues); if (rc) goto err_setup_tx; rc = ena_create_io_tx_queues_in_range(adapter, 0, adapter->num_io_queues); if (rc) goto err_create_tx_queues; rc = ena_setup_all_rx_resources(adapter); if (rc) goto err_setup_rx; rc = ena_create_all_io_rx_queues(adapter); if (rc) goto err_create_rx_queues; return 0; err_create_rx_queues: ena_free_all_io_rx_resources(adapter); err_setup_rx: ena_destroy_all_tx_queues(adapter); err_create_tx_queues: ena_free_all_io_tx_resources(adapter); err_setup_tx: if (rc != -ENOMEM) { netif_err(adapter, ifup, adapter->netdev, "Queue creation failed with error code %d\n", rc); return rc; } cur_tx_ring_size = adapter->tx_ring[0].ring_size; cur_rx_ring_size = adapter->rx_ring[0].ring_size; netif_err(adapter, ifup, adapter->netdev, "Not enough memory to create queues with sizes TX=%d, RX=%d\n", cur_tx_ring_size, cur_rx_ring_size); new_tx_ring_size = cur_tx_ring_size; new_rx_ring_size = cur_rx_ring_size; /* Decrease the size of the larger queue, or * decrease both if they are the same size. */ if (cur_rx_ring_size <= cur_tx_ring_size) new_tx_ring_size = cur_tx_ring_size / 2; if (cur_rx_ring_size >= cur_tx_ring_size) new_rx_ring_size = cur_rx_ring_size / 2; if (new_tx_ring_size < ENA_MIN_RING_SIZE || new_rx_ring_size < ENA_MIN_RING_SIZE) { netif_err(adapter, ifup, adapter->netdev, "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n", ENA_MIN_RING_SIZE); return rc; } netif_err(adapter, ifup, adapter->netdev, "Retrying queue creation with sizes TX=%d, RX=%d\n", new_tx_ring_size, new_rx_ring_size); set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size); } } static int ena_up(struct ena_adapter *adapter) { int io_queue_count, rc, i; netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__); io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; ena_setup_io_intr(adapter); /* napi poll functions should be initialized before running * request_irq(), to handle a rare condition where there is a pending * interrupt, causing the ISR to fire immediately while the poll * function wasn't set yet, causing a null dereference */ ena_init_napi_in_range(adapter, 0, io_queue_count); rc = ena_request_io_irq(adapter); if (rc) goto err_req_irq; rc = create_queues_with_size_backoff(adapter); if (rc) goto err_create_queues_with_backoff; rc = ena_up_complete(adapter); if (rc) goto err_up; if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) netif_carrier_on(adapter->netdev); ena_increase_stat(&adapter->dev_stats.interface_up, 1, &adapter->syncp); set_bit(ENA_FLAG_DEV_UP, &adapter->flags); /* Enable completion queues interrupt */ for (i = 0; i < adapter->num_io_queues; i++) ena_unmask_interrupt(&adapter->tx_ring[i], &adapter->rx_ring[i]); /* schedule napi in case we had pending packets * from the last time we disable napi */ for (i = 0; i < io_queue_count; i++) napi_schedule(&adapter->ena_napi[i].napi); return rc; err_up: ena_destroy_all_tx_queues(adapter); ena_free_all_io_tx_resources(adapter); ena_destroy_all_rx_queues(adapter); ena_free_all_io_rx_resources(adapter); err_create_queues_with_backoff: ena_free_io_irq(adapter); err_req_irq: ena_del_napi_in_range(adapter, 0, io_queue_count); return rc; } static void ena_down(struct ena_adapter *adapter) { int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__); clear_bit(ENA_FLAG_DEV_UP, &adapter->flags); ena_increase_stat(&adapter->dev_stats.interface_down, 1, &adapter->syncp); netif_carrier_off(adapter->netdev); netif_tx_disable(adapter->netdev); /* After this point the napi handler won't enable the tx queue */ ena_napi_disable_in_range(adapter, 0, io_queue_count); /* After destroy the queue there won't be any new interrupts */ if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) { int rc; rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); if (rc) netif_err(adapter, ifdown, adapter->netdev, "Device reset failed\n"); /* stop submitting admin commands on a device that was reset */ ena_com_set_admin_running_state(adapter->ena_dev, false); } ena_destroy_all_io_queues(adapter); ena_disable_io_intr_sync(adapter); ena_free_io_irq(adapter); ena_del_napi_in_range(adapter, 0, io_queue_count); ena_free_all_tx_bufs(adapter); ena_free_all_rx_bufs(adapter); ena_free_all_io_tx_resources(adapter); ena_free_all_io_rx_resources(adapter); } /* ena_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. */ static int ena_open(struct net_device *netdev) { struct ena_adapter *adapter = netdev_priv(netdev); int rc; /* Notify the stack of the actual queue counts. */ rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues); if (rc) { netif_err(adapter, ifup, netdev, "Can't set num tx queues\n"); return rc; } rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues); if (rc) { netif_err(adapter, ifup, netdev, "Can't set num rx queues\n"); return rc; } rc = ena_up(adapter); if (rc) return rc; return rc; } /* ena_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. */ static int ena_close(struct net_device *netdev) { struct ena_adapter *adapter = netdev_priv(netdev); netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) return 0; if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) ena_down(adapter); /* Check for device status and issue reset if needed*/ check_for_admin_com_state(adapter); if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { netif_err(adapter, ifdown, adapter->netdev, "Destroy failure, restarting device\n"); ena_dump_stats_to_dmesg(adapter); /* rtnl lock already obtained in dev_ioctl() layer */ ena_destroy_device(adapter, false); ena_restore_device(adapter); } return 0; } int ena_update_queue_params(struct ena_adapter *adapter, u32 new_tx_size, u32 new_rx_size, u32 new_llq_header_len) { bool dev_was_up, large_llq_changed = false; int rc = 0; dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); ena_close(adapter->netdev); adapter->requested_tx_ring_size = new_tx_size; adapter->requested_rx_ring_size = new_rx_size; ena_init_io_rings(adapter, 0, adapter->xdp_num_queues + adapter->num_io_queues); large_llq_changed = adapter->ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV; large_llq_changed &= new_llq_header_len != adapter->ena_dev->tx_max_header_size; /* a check that the configuration is valid is done by caller */ if (large_llq_changed) { adapter->large_llq_header_enabled = !adapter->large_llq_header_enabled; ena_destroy_device(adapter, false); rc = ena_restore_device(adapter); } return dev_was_up && !rc ? ena_up(adapter) : rc; } int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak) { struct ena_ring *rx_ring; int i; if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE)) return -EINVAL; adapter->rx_copybreak = rx_copybreak; for (i = 0; i < adapter->num_io_queues; i++) { rx_ring = &adapter->rx_ring[i]; rx_ring->rx_copybreak = rx_copybreak; } return 0; } int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count) { struct ena_com_dev *ena_dev = adapter->ena_dev; int prev_channel_count; bool dev_was_up; dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); ena_close(adapter->netdev); prev_channel_count = adapter->num_io_queues; adapter->num_io_queues = new_channel_count; if (ena_xdp_present(adapter) && ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) { adapter->xdp_first_ring = new_channel_count; adapter->xdp_num_queues = new_channel_count; if (prev_channel_count > new_channel_count) ena_xdp_exchange_program_rx_in_range(adapter, NULL, new_channel_count, prev_channel_count); else ena_xdp_exchange_program_rx_in_range(adapter, adapter->xdp_bpf_prog, prev_channel_count, new_channel_count); } /* We need to destroy the rss table so that the indirection * table will be reinitialized by ena_up() */ ena_com_rss_destroy(ena_dev); ena_init_io_rings(adapter, 0, adapter->xdp_num_queues + adapter->num_io_queues); return dev_was_up ? ena_open(adapter->netdev) : 0; } static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb, bool disable_meta_caching) { u32 mss = skb_shinfo(skb)->gso_size; struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; u8 l4_protocol = 0; if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) { ena_tx_ctx->l4_csum_enable = 1; if (mss) { ena_tx_ctx->tso_enable = 1; ena_meta->l4_hdr_len = tcp_hdr(skb)->doff; ena_tx_ctx->l4_csum_partial = 0; } else { ena_tx_ctx->tso_enable = 0; ena_meta->l4_hdr_len = 0; ena_tx_ctx->l4_csum_partial = 1; } switch (ip_hdr(skb)->version) { case IPVERSION: ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; if (ip_hdr(skb)->frag_off & htons(IP_DF)) ena_tx_ctx->df = 1; if (mss) ena_tx_ctx->l3_csum_enable = 1; l4_protocol = ip_hdr(skb)->protocol; break; case 6: ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; l4_protocol = ipv6_hdr(skb)->nexthdr; break; default: break; } if (l4_protocol == IPPROTO_TCP) ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; else ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; ena_meta->mss = mss; ena_meta->l3_hdr_len = skb_network_header_len(skb); ena_meta->l3_hdr_offset = skb_network_offset(skb); ena_tx_ctx->meta_valid = 1; } else if (disable_meta_caching) { memset(ena_meta, 0, sizeof(*ena_meta)); ena_tx_ctx->meta_valid = 1; } else { ena_tx_ctx->meta_valid = 0; } } static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, struct sk_buff *skb) { int num_frags, header_len, rc; num_frags = skb_shinfo(skb)->nr_frags; header_len = skb_headlen(skb); if (num_frags < tx_ring->sgl_size) return 0; if ((num_frags == tx_ring->sgl_size) && (header_len < tx_ring->tx_max_header_size)) return 0; ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp); rc = skb_linearize(skb); if (unlikely(rc)) { ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1, &tx_ring->syncp); } return rc; } static int ena_tx_map_skb(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info, struct sk_buff *skb, void **push_hdr, u16 *header_len) { struct ena_adapter *adapter = tx_ring->adapter; struct ena_com_buf *ena_buf; dma_addr_t dma; u32 skb_head_len, frag_len, last_frag; u16 push_len = 0; u16 delta = 0; int i = 0; skb_head_len = skb_headlen(skb); tx_info->skb = skb; ena_buf = tx_info->bufs; if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { /* When the device is LLQ mode, the driver will copy * the header into the device memory space. * the ena_com layer assume the header is in a linear * memory space. * This assumption might be wrong since part of the header * can be in the fragmented buffers. * Use skb_header_pointer to make sure the header is in a * linear memory space. */ push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); *push_hdr = skb_header_pointer(skb, 0, push_len, tx_ring->push_buf_intermediate_buf); *header_len = push_len; if (unlikely(skb->data != *push_hdr)) { ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1, &tx_ring->syncp); delta = push_len - skb_head_len; } } else { *push_hdr = NULL; *header_len = min_t(u32, skb_head_len, tx_ring->tx_max_header_size); } netif_dbg(adapter, tx_queued, adapter->netdev, "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, *push_hdr, push_len); if (skb_head_len > push_len) { dma = dma_map_single(tx_ring->dev, skb->data + push_len, skb_head_len - push_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(tx_ring->dev, dma))) goto error_report_dma_error; ena_buf->paddr = dma; ena_buf->len = skb_head_len - push_len; ena_buf++; tx_info->num_of_bufs++; tx_info->map_linear_data = 1; } else { tx_info->map_linear_data = 0; } last_frag = skb_shinfo(skb)->nr_frags; for (i = 0; i < last_frag; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; frag_len = skb_frag_size(frag); if (unlikely(delta >= frag_len)) { delta -= frag_len; continue; } dma = skb_frag_dma_map(tx_ring->dev, frag, delta, frag_len - delta, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(tx_ring->dev, dma))) goto error_report_dma_error; ena_buf->paddr = dma; ena_buf->len = frag_len - delta; ena_buf++; tx_info->num_of_bufs++; delta = 0; } return 0; error_report_dma_error: ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1, &tx_ring->syncp); netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n"); tx_info->skb = NULL; tx_info->num_of_bufs += i; ena_unmap_tx_buff(tx_ring, tx_info); return -EINVAL; } /* Called with netif_tx_lock. */ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ena_adapter *adapter = netdev_priv(dev); struct ena_tx_buffer *tx_info; struct ena_com_tx_ctx ena_tx_ctx; struct ena_ring *tx_ring; struct netdev_queue *txq; void *push_hdr; u16 next_to_use, req_id, header_len; int qid, rc; netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); /* Determine which tx ring we will be placed on */ qid = skb_get_queue_mapping(skb); tx_ring = &adapter->tx_ring[qid]; txq = netdev_get_tx_queue(dev, qid); rc = ena_check_and_linearize_skb(tx_ring, skb); if (unlikely(rc)) goto error_drop_packet; skb_tx_timestamp(skb); next_to_use = tx_ring->next_to_use; req_id = tx_ring->free_ids[next_to_use]; tx_info = &tx_ring->tx_buffer_info[req_id]; tx_info->num_of_bufs = 0; WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len); if (unlikely(rc)) goto error_drop_packet; memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); ena_tx_ctx.ena_bufs = tx_info->bufs; ena_tx_ctx.push_header = push_hdr; ena_tx_ctx.num_bufs = tx_info->num_of_bufs; ena_tx_ctx.req_id = req_id; ena_tx_ctx.header_len = header_len; /* set flags and meta data */ ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching); rc = ena_xmit_common(dev, tx_ring, tx_info, &ena_tx_ctx, next_to_use, skb->len); if (rc) goto error_unmap_dma; netdev_tx_sent_queue(txq, skb->len); /* stop the queue when no more space available, the packet can have up * to sgl_size + 2. one for the meta descriptor and one for header * (if the header is larger than tx_max_header_size). */ if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, tx_ring->sgl_size + 2))) { netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n", __func__, qid); netif_tx_stop_queue(txq); ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1, &tx_ring->syncp); /* There is a rare condition where this function decide to * stop the queue but meanwhile clean_tx_irq updates * next_to_completion and terminates. * The queue will remain stopped forever. * To solve this issue add a mb() to make sure that * netif_tx_stop_queue() write is vissible before checking if * there is additional space in the queue. */ smp_mb(); if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, ENA_TX_WAKEUP_THRESH)) { netif_tx_wake_queue(txq); ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, &tx_ring->syncp); } } if (netif_xmit_stopped(txq) || !netdev_xmit_more()) /* trigger the dma engine. ena_ring_tx_doorbell() * calls a memory barrier inside it. */ ena_ring_tx_doorbell(tx_ring); return NETDEV_TX_OK; error_unmap_dma: ena_unmap_tx_buff(tx_ring, tx_info); tx_info->skb = NULL; error_drop_packet: dev_kfree_skb(skb); return NETDEV_TX_OK; } static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { u16 qid; /* we suspect that this is good for in--kernel network services that * want to loop incoming skb rx to tx in normal user generated traffic, * most probably we will not get to this */ if (skb_rx_queue_recorded(skb)) qid = skb_get_rx_queue(skb); else qid = netdev_pick_tx(dev, skb, NULL); return qid; } static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev) { struct device *dev = &pdev->dev; struct ena_admin_host_info *host_info; int rc; /* Allocate only the host info */ rc = ena_com_allocate_host_info(ena_dev); if (rc) { dev_err(dev, "Cannot allocate host info\n"); return; } host_info = ena_dev->host_attr.host_info; host_info->bdf = pci_dev_id(pdev); host_info->os_type = ENA_ADMIN_OS_LINUX; host_info->kernel_ver = LINUX_VERSION_CODE; strscpy(host_info->kernel_ver_str, utsname()->version, sizeof(host_info->kernel_ver_str) - 1); host_info->os_dist = 0; strncpy(host_info->os_dist_str, utsname()->release, sizeof(host_info->os_dist_str) - 1); host_info->driver_version = (DRV_MODULE_GEN_MAJOR) | (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) | ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT); host_info->num_cpus = num_online_cpus(); host_info->driver_supported_features = ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK | ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK | ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK | ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK; rc = ena_com_set_host_attributes(ena_dev); if (rc) { if (rc == -EOPNOTSUPP) dev_warn(dev, "Cannot set host attributes\n"); else dev_err(dev, "Cannot set host attributes\n"); goto err; } return; err: ena_com_delete_host_info(ena_dev); } static void ena_config_debug_area(struct ena_adapter *adapter) { u32 debug_area_size; int rc, ss_count; ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS); if (ss_count <= 0) { netif_err(adapter, drv, adapter->netdev, "SS count is negative\n"); return; } /* allocate 32 bytes for each string and 64bit for the value */ debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size); if (rc) { netif_err(adapter, drv, adapter->netdev, "Cannot allocate debug area\n"); return; } rc = ena_com_set_host_attributes(adapter->ena_dev); if (rc) { if (rc == -EOPNOTSUPP) netif_warn(adapter, drv, adapter->netdev, "Cannot set host attributes\n"); else netif_err(adapter, drv, adapter->netdev, "Cannot set host attributes\n"); goto err; } return; err: ena_com_delete_debug_area(adapter->ena_dev); } int ena_update_hw_stats(struct ena_adapter *adapter) { int rc; rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats); if (rc) { netdev_err(adapter->netdev, "Failed to get ENI stats\n"); return rc; } return 0; } static void ena_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct ena_adapter *adapter = netdev_priv(netdev); struct ena_ring *rx_ring, *tx_ring; unsigned int start; u64 rx_drops; u64 tx_drops; int i; if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) return; for (i = 0; i < adapter->num_io_queues; i++) { u64 bytes, packets; tx_ring = &adapter->tx_ring[i]; do { start = u64_stats_fetch_begin(&tx_ring->syncp); packets = tx_ring->tx_stats.cnt; bytes = tx_ring->tx_stats.bytes; } while (u64_stats_fetch_retry(&tx_ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; rx_ring = &adapter->rx_ring[i]; do { start = u64_stats_fetch_begin(&rx_ring->syncp); packets = rx_ring->rx_stats.cnt; bytes = rx_ring->rx_stats.bytes; } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; } do { start = u64_stats_fetch_begin(&adapter->syncp); rx_drops = adapter->dev_stats.rx_drops; tx_drops = adapter->dev_stats.tx_drops; } while (u64_stats_fetch_retry(&adapter->syncp, start)); stats->rx_dropped = rx_drops; stats->tx_dropped = tx_drops; stats->multicast = 0; stats->collisions = 0; stats->rx_length_errors = 0; stats->rx_crc_errors = 0; stats->rx_frame_errors = 0; stats->rx_fifo_errors = 0; stats->rx_missed_errors = 0; stats->tx_window_errors = 0; stats->rx_errors = 0; stats->tx_errors = 0; } static const struct net_device_ops ena_netdev_ops = { .ndo_open = ena_open, .ndo_stop = ena_close, .ndo_start_xmit = ena_start_xmit, .ndo_select_queue = ena_select_queue, .ndo_get_stats64 = ena_get_stats64, .ndo_tx_timeout = ena_tx_timeout, .ndo_change_mtu = ena_change_mtu, .ndo_set_mac_address = NULL, .ndo_validate_addr = eth_validate_addr, .ndo_bpf = ena_xdp, .ndo_xdp_xmit = ena_xdp_xmit, }; static void ena_calc_io_queue_size(struct ena_adapter *adapter, struct ena_com_dev_get_features_ctx *get_feat_ctx) { struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq; struct ena_com_dev *ena_dev = adapter->ena_dev; u32 tx_queue_size = ENA_DEFAULT_RING_SIZE; u32 rx_queue_size = ENA_DEFAULT_RING_SIZE; u32 max_tx_queue_size; u32 max_rx_queue_size; /* If this function is called after driver load, the ring sizes have already * been configured. Take it into account when recalculating ring size. */ if (adapter->tx_ring->ring_size) tx_queue_size = adapter->tx_ring->ring_size; if (adapter->rx_ring->ring_size) rx_queue_size = adapter->rx_ring->ring_size; if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { struct ena_admin_queue_ext_feature_fields *max_queue_ext = &get_feat_ctx->max_queue_ext.max_queue_ext; max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth, max_queue_ext->max_rx_sq_depth); max_tx_queue_size = max_queue_ext->max_tx_cq_depth; if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) max_tx_queue_size = min_t(u32, max_tx_queue_size, llq->max_llq_depth); else max_tx_queue_size = min_t(u32, max_tx_queue_size, max_queue_ext->max_tx_sq_depth); adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, max_queue_ext->max_per_packet_tx_descs); adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, max_queue_ext->max_per_packet_rx_descs); } else { struct ena_admin_queue_feature_desc *max_queues = &get_feat_ctx->max_queues; max_rx_queue_size = min_t(u32, max_queues->max_cq_depth, max_queues->max_sq_depth); max_tx_queue_size = max_queues->max_cq_depth; if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) max_tx_queue_size = min_t(u32, max_tx_queue_size, llq->max_llq_depth); else max_tx_queue_size = min_t(u32, max_tx_queue_size, max_queues->max_sq_depth); adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, max_queues->max_packet_tx_descs); adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, max_queues->max_packet_rx_descs); } max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size); max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size); /* When forcing large headers, we multiply the entry size by 2, and therefore divide * the queue size by 2, leaving the amount of memory used by the queues unchanged. */ if (adapter->large_llq_header_enabled) { if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) && ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { max_tx_queue_size /= 2; dev_info(&adapter->pdev->dev, "Forcing large headers and decreasing maximum TX queue size to %d\n", max_tx_queue_size); } else { dev_err(&adapter->pdev->dev, "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); adapter->large_llq_header_enabled = false; } } tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE, max_tx_queue_size); rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE, max_rx_queue_size); tx_queue_size = rounddown_pow_of_two(tx_queue_size); rx_queue_size = rounddown_pow_of_two(rx_queue_size); adapter->max_tx_ring_size = max_tx_queue_size; adapter->max_rx_ring_size = max_rx_queue_size; adapter->requested_tx_ring_size = tx_queue_size; adapter->requested_rx_ring_size = rx_queue_size; } static int ena_device_validate_params(struct ena_adapter *adapter, struct ena_com_dev_get_features_ctx *get_feat_ctx) { struct net_device *netdev = adapter->netdev; int rc; rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr); if (!rc) { netif_err(adapter, drv, netdev, "Error, mac address are different\n"); return -EINVAL; } if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) { netif_err(adapter, drv, netdev, "Error, device max mtu is smaller than netdev MTU\n"); return -EINVAL; } return 0; } static void set_default_llq_configurations(struct ena_adapter *adapter, struct ena_llq_configurations *llq_config, struct ena_admin_feature_llq_desc *llq) { struct ena_com_dev *ena_dev = adapter->ena_dev; llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; adapter->large_llq_header_supported = !!(ena_dev->supported_features & BIT(ENA_ADMIN_LLQ)); adapter->large_llq_header_supported &= !!(llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B); if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) && adapter->large_llq_header_enabled) { llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_256B; llq_config->llq_ring_entry_size_value = 256; } else { llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; llq_config->llq_ring_entry_size_value = 128; } } static int ena_set_queues_placement_policy(struct pci_dev *pdev, struct ena_com_dev *ena_dev, struct ena_admin_feature_llq_desc *llq, struct ena_llq_configurations *llq_default_configurations) { int rc; u32 llq_feature_mask; llq_feature_mask = 1 << ENA_ADMIN_LLQ; if (!(ena_dev->supported_features & llq_feature_mask)) { dev_warn(&pdev->dev, "LLQ is not supported Fallback to host mode policy.\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; return 0; } if (!ena_dev->mem_bar) { netdev_err(ena_dev->net_device, "LLQ is advertised as supported but device doesn't expose mem bar\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; return 0; } rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); if (unlikely(rc)) { dev_err(&pdev->dev, "Failed to configure the device mode. Fallback to host mode policy.\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; } return 0; } static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev, int bars) { bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR)); if (!has_mem_bar) return 0; ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, pci_resource_start(pdev, ENA_MEM_BAR), pci_resource_len(pdev, ENA_MEM_BAR)); if (!ena_dev->mem_bar) return -EFAULT; return 0; } static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev, struct ena_com_dev_get_features_ctx *get_feat_ctx, bool *wd_state) { struct ena_com_dev *ena_dev = adapter->ena_dev; struct ena_llq_configurations llq_config; struct device *dev = &pdev->dev; bool readless_supported; u32 aenq_groups; int dma_width; int rc; rc = ena_com_mmio_reg_read_request_init(ena_dev); if (rc) { dev_err(dev, "Failed to init mmio read less\n"); return rc; } /* The PCIe configuration space revision id indicate if mmio reg * read is disabled */ readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ); ena_com_set_mmio_read_mode(ena_dev, readless_supported); rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); if (rc) { dev_err(dev, "Can not reset device\n"); goto err_mmio_read_less; } rc = ena_com_validate_version(ena_dev); if (rc) { dev_err(dev, "Device version is too low\n"); goto err_mmio_read_less; } dma_width = ena_com_get_dma_width(ena_dev); if (dma_width < 0) { dev_err(dev, "Invalid dma width value %d", dma_width); rc = dma_width; goto err_mmio_read_less; } rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width)); if (rc) { dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc); goto err_mmio_read_less; } /* ENA admin level init */ rc = ena_com_admin_init(ena_dev, &aenq_handlers); if (rc) { dev_err(dev, "Can not initialize ena admin queue with device\n"); goto err_mmio_read_less; } /* To enable the msix interrupts the driver needs to know the number * of queues. So the driver uses polling mode to retrieve this * information */ ena_com_set_admin_polling_mode(ena_dev, true); ena_config_host_info(ena_dev, pdev); /* Get Device Attributes*/ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); if (rc) { dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc); goto err_admin_init; } /* Try to turn all the available aenq groups */ aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | BIT(ENA_ADMIN_FATAL_ERROR) | BIT(ENA_ADMIN_WARNING) | BIT(ENA_ADMIN_NOTIFICATION) | BIT(ENA_ADMIN_KEEP_ALIVE); aenq_groups &= get_feat_ctx->aenq.supported_groups; rc = ena_com_set_aenq_config(ena_dev, aenq_groups); if (rc) { dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc); goto err_admin_init; } *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); set_default_llq_configurations(adapter, &llq_config, &get_feat_ctx->llq); rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, &llq_config); if (rc) { dev_err(dev, "ENA device init failed\n"); goto err_admin_init; } ena_calc_io_queue_size(adapter, get_feat_ctx); return 0; err_admin_init: ena_com_delete_host_info(ena_dev); ena_com_admin_destroy(ena_dev); err_mmio_read_less: ena_com_mmio_reg_read_request_destroy(ena_dev); return rc; } static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter) { struct ena_com_dev *ena_dev = adapter->ena_dev; struct device *dev = &adapter->pdev->dev; int rc; rc = ena_enable_msix(adapter); if (rc) { dev_err(dev, "Can not reserve msix vectors\n"); return rc; } ena_setup_mgmnt_intr(adapter); rc = ena_request_mgmnt_irq(adapter); if (rc) { dev_err(dev, "Can not setup management interrupts\n"); goto err_disable_msix; } ena_com_set_admin_polling_mode(ena_dev, false); ena_com_admin_aenq_enable(ena_dev); return 0; err_disable_msix: ena_disable_msix(adapter); return rc; } static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) { struct net_device *netdev = adapter->netdev; struct ena_com_dev *ena_dev = adapter->ena_dev; bool dev_up; if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) return; netif_carrier_off(netdev); del_timer_sync(&adapter->timer_service); dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); adapter->dev_up_before_reset = dev_up; if (!graceful) ena_com_set_admin_running_state(ena_dev, false); if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) ena_down(adapter); /* Stop the device from sending AENQ events (in case reset flag is set * and device is up, ena_down() already reset the device. */ if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); ena_com_abort_admin_commands(ena_dev); ena_com_wait_for_abort_completion(ena_dev); ena_com_admin_destroy(ena_dev); ena_com_mmio_reg_read_request_destroy(ena_dev); /* return reset reason to default value */ adapter->reset_reason = ENA_REGS_RESET_NORMAL; clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); } static int ena_restore_device(struct ena_adapter *adapter) { struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_com_dev *ena_dev = adapter->ena_dev; struct pci_dev *pdev = adapter->pdev; struct ena_ring *txr; int rc, count, i; bool wd_state; set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx, &wd_state); if (rc) { dev_err(&pdev->dev, "Can not initialize device\n"); goto err; } adapter->wd_state = wd_state; count = adapter->xdp_num_queues + adapter->num_io_queues; for (i = 0 ; i < count; i++) { txr = &adapter->tx_ring[i]; txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; txr->tx_max_header_size = ena_dev->tx_max_header_size; } rc = ena_device_validate_params(adapter, &get_feat_ctx); if (rc) { dev_err(&pdev->dev, "Validation of device parameters failed\n"); goto err_device_destroy; } rc = ena_enable_msix_and_set_admin_interrupts(adapter); if (rc) { dev_err(&pdev->dev, "Enable MSI-X failed\n"); goto err_device_destroy; } /* If the interface was up before the reset bring it up */ if (adapter->dev_up_before_reset) { rc = ena_up(adapter); if (rc) { dev_err(&pdev->dev, "Failed to create I/O queues\n"); goto err_disable_msix; } } set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) netif_carrier_on(adapter->netdev); mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); adapter->last_keep_alive_jiffies = jiffies; return rc; err_disable_msix: ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); err_device_destroy: ena_com_abort_admin_commands(ena_dev); ena_com_wait_for_abort_completion(ena_dev); ena_com_admin_destroy(ena_dev); ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); ena_com_mmio_reg_read_request_destroy(ena_dev); err: clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); dev_err(&pdev->dev, "Reset attempt failed. Can not reset the device\n"); return rc; } static void ena_fw_reset_device(struct work_struct *work) { struct ena_adapter *adapter = container_of(work, struct ena_adapter, reset_task); rtnl_lock(); if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { ena_destroy_device(adapter, false); ena_restore_device(adapter); dev_err(&adapter->pdev->dev, "Device reset completed successfully\n"); } rtnl_unlock(); } static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, struct ena_ring *rx_ring) { struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi); if (likely(READ_ONCE(ena_napi->first_interrupt))) return 0; if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) return 0; rx_ring->no_interrupt_event_cnt++; if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { netif_err(adapter, rx_err, adapter->netdev, "Potential MSIX issue on Rx side Queue = %d. Reset the device\n", rx_ring->qid); ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT); return -EIO; } return 0; } static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, struct ena_ring *tx_ring) { struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi); unsigned int time_since_last_napi; unsigned int missing_tx_comp_to; bool is_tx_comp_time_expired; struct ena_tx_buffer *tx_buf; unsigned long last_jiffies; u32 missed_tx = 0; int i, rc = 0; for (i = 0; i < tx_ring->ring_size; i++) { tx_buf = &tx_ring->tx_buffer_info[i]; last_jiffies = tx_buf->last_jiffies; if (last_jiffies == 0) /* no pending Tx at this location */ continue; is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies + 2 * adapter->missing_tx_completion_to); if (unlikely(!READ_ONCE(ena_napi->first_interrupt) && is_tx_comp_time_expired)) { /* If after graceful period interrupt is still not * received, we schedule a reset */ netif_err(adapter, tx_err, adapter->netdev, "Potential MSIX issue on Tx side Queue = %d. Reset the device\n", tx_ring->qid); ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT); return -EIO; } is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies + adapter->missing_tx_completion_to); if (unlikely(is_tx_comp_time_expired)) { if (!tx_buf->print_once) { time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to); netif_notice(adapter, tx_err, adapter->netdev, "Found a Tx that wasn't completed on time, qid %d, index %d. %u usecs have passed since last napi execution. Missing Tx timeout value %u msecs\n", tx_ring->qid, i, time_since_last_napi, missing_tx_comp_to); } tx_buf->print_once = 1; missed_tx++; } } if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) { netif_err(adapter, tx_err, adapter->netdev, "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", missed_tx, adapter->missing_tx_completion_threshold); ena_reset_device(adapter, ENA_REGS_RESET_MISS_TX_CMPL); rc = -EIO; } ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx, &tx_ring->syncp); return rc; } static void check_for_missing_completions(struct ena_adapter *adapter) { struct ena_ring *tx_ring; struct ena_ring *rx_ring; int i, budget, rc; int io_queue_count; io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues; /* Make sure the driver doesn't turn the device in other process */ smp_rmb(); if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) return; if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) return; if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) return; budget = ENA_MONITORED_TX_QUEUES; for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) { tx_ring = &adapter->tx_ring[i]; rx_ring = &adapter->rx_ring[i]; rc = check_missing_comp_in_tx_queue(adapter, tx_ring); if (unlikely(rc)) return; rc = !ENA_IS_XDP_INDEX(adapter, i) ? check_for_rx_interrupt_queue(adapter, rx_ring) : 0; if (unlikely(rc)) return; budget--; if (!budget) break; } adapter->last_monitored_tx_qid = i % io_queue_count; } /* trigger napi schedule after 2 consecutive detections */ #define EMPTY_RX_REFILL 2 /* For the rare case where the device runs out of Rx descriptors and the * napi handler failed to refill new Rx descriptors (due to a lack of memory * for example). * This case will lead to a deadlock: * The device won't send interrupts since all the new Rx packets will be dropped * The napi handler won't allocate new Rx descriptors so the device will be * able to send new packets. * * This scenario can happen when the kernel's vm.min_free_kbytes is too small. * It is recommended to have at least 512MB, with a minimum of 128MB for * constrained environment). * * When such a situation is detected - Reschedule napi */ static void check_for_empty_rx_ring(struct ena_adapter *adapter) { struct ena_ring *rx_ring; int i, refill_required; if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) return; if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) return; for (i = 0; i < adapter->num_io_queues; i++) { rx_ring = &adapter->rx_ring[i]; refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); if (unlikely(refill_required == (rx_ring->ring_size - 1))) { rx_ring->empty_rx_queue++; if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1, &rx_ring->syncp); netif_err(adapter, drv, adapter->netdev, "Trigger refill for ring %d\n", i); napi_schedule(rx_ring->napi); rx_ring->empty_rx_queue = 0; } } else { rx_ring->empty_rx_queue = 0; } } } /* Check for keep alive expiration */ static void check_for_missing_keep_alive(struct ena_adapter *adapter) { unsigned long keep_alive_expired; if (!adapter->wd_state) return; if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) return; keep_alive_expired = adapter->last_keep_alive_jiffies + adapter->keep_alive_timeout; if (unlikely(time_is_before_jiffies(keep_alive_expired))) { netif_err(adapter, drv, adapter->netdev, "Keep alive watchdog timeout.\n"); ena_increase_stat(&adapter->dev_stats.wd_expired, 1, &adapter->syncp); ena_reset_device(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO); } } static void check_for_admin_com_state(struct ena_adapter *adapter) { if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) { netif_err(adapter, drv, adapter->netdev, "ENA admin queue is not in running state!\n"); ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1, &adapter->syncp); ena_reset_device(adapter, ENA_REGS_RESET_ADMIN_TO); } } static void ena_update_hints(struct ena_adapter *adapter, struct ena_admin_ena_hw_hints *hints) { struct net_device *netdev = adapter->netdev; if (hints->admin_completion_tx_timeout) adapter->ena_dev->admin_queue.completion_timeout = hints->admin_completion_tx_timeout * 1000; if (hints->mmio_read_timeout) /* convert to usec */ adapter->ena_dev->mmio_read.reg_read_to = hints->mmio_read_timeout * 1000; if (hints->missed_tx_completion_count_threshold_to_reset) adapter->missing_tx_completion_threshold = hints->missed_tx_completion_count_threshold_to_reset; if (hints->missing_tx_completion_timeout) { if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT) adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT; else adapter->missing_tx_completion_to = msecs_to_jiffies(hints->missing_tx_completion_timeout); } if (hints->netdev_wd_timeout) netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout); if (hints->driver_watchdog_timeout) { if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; else adapter->keep_alive_timeout = msecs_to_jiffies(hints->driver_watchdog_timeout); } } static void ena_update_host_info(struct ena_admin_host_info *host_info, struct net_device *netdev) { host_info->supported_network_features[0] = netdev->features & GENMASK_ULL(31, 0); host_info->supported_network_features[1] = (netdev->features & GENMASK_ULL(63, 32)) >> 32; } static void ena_timer_service(struct timer_list *t) { struct ena_adapter *adapter = from_timer(adapter, t, timer_service); u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr; struct ena_admin_host_info *host_info = adapter->ena_dev->host_attr.host_info; check_for_missing_keep_alive(adapter); check_for_admin_com_state(adapter); check_for_missing_completions(adapter); check_for_empty_rx_ring(adapter); if (debug_area) ena_dump_stats_to_buf(adapter, debug_area); if (host_info) ena_update_host_info(host_info, adapter->netdev); if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { netif_err(adapter, drv, adapter->netdev, "Trigger reset is on\n"); ena_dump_stats_to_dmesg(adapter); queue_work(ena_wq, &adapter->reset_task); return; } /* Reset the timer */ mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); } static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev, struct ena_com_dev *ena_dev, struct ena_com_dev_get_features_ctx *get_feat_ctx) { u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { struct ena_admin_queue_ext_feature_fields *max_queue_ext = &get_feat_ctx->max_queue_ext.max_queue_ext; io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num, max_queue_ext->max_rx_cq_num); io_tx_sq_num = max_queue_ext->max_tx_sq_num; io_tx_cq_num = max_queue_ext->max_tx_cq_num; } else { struct ena_admin_queue_feature_desc *max_queues = &get_feat_ctx->max_queues; io_tx_sq_num = max_queues->max_sq_num; io_tx_cq_num = max_queues->max_cq_num; io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num); } /* In case of LLQ use the llq fields for the tx SQ/CQ */ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) io_tx_sq_num = get_feat_ctx->llq.max_llq_num; max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES); max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num); max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num); max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num); /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */ max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1); return max_num_io_queues; } static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, struct net_device *netdev) { netdev_features_t dev_features = 0; /* Set offload features */ if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) dev_features |= NETIF_F_IP_CSUM; if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) dev_features |= NETIF_F_IPV6_CSUM; if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) dev_features |= NETIF_F_TSO; if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) dev_features |= NETIF_F_TSO6; if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) dev_features |= NETIF_F_TSO_ECN; if (feat->offload.rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) dev_features |= NETIF_F_RXCSUM; if (feat->offload.rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) dev_features |= NETIF_F_RXCSUM; netdev->features = dev_features | NETIF_F_SG | NETIF_F_RXHASH | NETIF_F_HIGHDMA; netdev->hw_features |= netdev->features; netdev->vlan_features |= netdev->features; } static void ena_set_conf_feat_params(struct ena_adapter *adapter, struct ena_com_dev_get_features_ctx *feat) { struct net_device *netdev = adapter->netdev; /* Copy mac address */ if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) { eth_hw_addr_random(netdev); ether_addr_copy(adapter->mac_addr, netdev->dev_addr); } else { ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr); eth_hw_addr_set(netdev, adapter->mac_addr); } /* Set offload features */ ena_set_dev_offloads(feat, netdev); adapter->max_mtu = feat->dev_attr.max_mtu; netdev->max_mtu = adapter->max_mtu; netdev->min_mtu = ENA_MIN_MTU; } static int ena_rss_init_default(struct ena_adapter *adapter) { struct ena_com_dev *ena_dev = adapter->ena_dev; struct device *dev = &adapter->pdev->dev; int rc, i; u32 val; rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); if (unlikely(rc)) { dev_err(dev, "Cannot init indirect table\n"); goto err_rss_init; } for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { val = ethtool_rxfh_indir_default(i, adapter->num_io_queues); rc = ena_com_indirect_table_fill_entry(ena_dev, i, ENA_IO_RXQ_IDX(val)); if (unlikely(rc)) { dev_err(dev, "Cannot fill indirect table\n"); goto err_fill_indir; } } rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, ENA_HASH_KEY_SIZE, 0xFFFFFFFF); if (unlikely(rc && (rc != -EOPNOTSUPP))) { dev_err(dev, "Cannot fill hash function\n"); goto err_fill_indir; } rc = ena_com_set_default_hash_ctrl(ena_dev); if (unlikely(rc && (rc != -EOPNOTSUPP))) { dev_err(dev, "Cannot fill hash control\n"); goto err_fill_indir; } return 0; err_fill_indir: ena_com_rss_destroy(ena_dev); err_rss_init: return rc; } static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) { int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; pci_release_selected_regions(pdev, release_bars); } /* ena_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in ena_pci_tbl * * Returns 0 on success, negative on failure * * ena_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. */ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_com_dev *ena_dev = NULL; struct ena_adapter *adapter; struct net_device *netdev; static int adapters_found; u32 max_num_io_queues; bool wd_state; int bars, rc; dev_dbg(&pdev->dev, "%s\n", __func__); rc = pci_enable_device_mem(pdev); if (rc) { dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n"); return rc; } rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS)); if (rc) { dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc); goto err_disable_device; } pci_set_master(pdev); ena_dev = vzalloc(sizeof(*ena_dev)); if (!ena_dev) { rc = -ENOMEM; goto err_disable_device; } bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); if (rc) { dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n", rc); goto err_free_ena_dev; } ena_dev->reg_bar = devm_ioremap(&pdev->dev, pci_resource_start(pdev, ENA_REG_BAR), pci_resource_len(pdev, ENA_REG_BAR)); if (!ena_dev->reg_bar) { dev_err(&pdev->dev, "Failed to remap regs bar\n"); rc = -EFAULT; goto err_free_region; } ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US; ena_dev->dmadev = &pdev->dev; netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), ENA_MAX_RINGS); if (!netdev) { dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); rc = -ENOMEM; goto err_free_region; } SET_NETDEV_DEV(netdev, &pdev->dev); adapter = netdev_priv(netdev); adapter->ena_dev = ena_dev; adapter->netdev = netdev; adapter->pdev = pdev; adapter->msg_enable = DEFAULT_MSG_ENABLE; ena_dev->net_device = netdev; pci_set_drvdata(pdev, adapter); rc = ena_map_llq_mem_bar(pdev, ena_dev, bars); if (rc) { dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n"); goto err_netdev_destroy; } rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state); if (rc) { dev_err(&pdev->dev, "ENA device init failed\n"); if (rc == -ETIME) rc = -EPROBE_DEFER; goto err_netdev_destroy; } /* Initial TX and RX interrupt delay. Assumes 1 usec granularity. * Updated during device initialization with the real granularity */ ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS; ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx); if (unlikely(!max_num_io_queues)) { rc = -EFAULT; goto err_device_destroy; } ena_set_conf_feat_params(adapter, &get_feat_ctx); adapter->reset_reason = ENA_REGS_RESET_NORMAL; adapter->num_io_queues = max_num_io_queues; adapter->max_num_io_queues = max_num_io_queues; adapter->last_monitored_tx_qid = 0; adapter->xdp_first_ring = 0; adapter->xdp_num_queues = 0; adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) adapter->disable_meta_caching = !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & BIT(ENA_ADMIN_DISABLE_META_CACHING)); adapter->wd_state = wd_state; snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found); rc = ena_com_init_interrupt_moderation(adapter->ena_dev); if (rc) { dev_err(&pdev->dev, "Failed to query interrupt moderation feature\n"); goto err_device_destroy; } ena_init_io_rings(adapter, 0, adapter->xdp_num_queues + adapter->num_io_queues); netdev->netdev_ops = &ena_netdev_ops; netdev->watchdog_timeo = TX_TIMEOUT; ena_set_ethtool_ops(netdev); netdev->priv_flags |= IFF_UNICAST_FLT; u64_stats_init(&adapter->syncp); rc = ena_enable_msix_and_set_admin_interrupts(adapter); if (rc) { dev_err(&pdev->dev, "Failed to enable and set the admin interrupts\n"); goto err_worker_destroy; } rc = ena_rss_init_default(adapter); if (rc && (rc != -EOPNOTSUPP)) { dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc); goto err_free_msix; } ena_config_debug_area(adapter); if (ena_xdp_legal_queue_count(adapter, adapter->num_io_queues)) netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); netif_carrier_off(netdev); rc = register_netdev(netdev); if (rc) { dev_err(&pdev->dev, "Cannot register net device\n"); goto err_rss; } INIT_WORK(&adapter->reset_task, ena_fw_reset_device); adapter->last_keep_alive_jiffies = jiffies; adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; adapter->missing_tx_completion_to = TX_TIMEOUT; adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS; ena_update_hints(adapter, &get_feat_ctx.hw_hints); timer_setup(&adapter->timer_service, ena_timer_service, 0); mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM\n", DEVICE_NAME, (long)pci_resource_start(pdev, 0), netdev->dev_addr); set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); adapters_found++; return 0; err_rss: ena_com_delete_debug_area(ena_dev); ena_com_rss_destroy(ena_dev); err_free_msix: ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR); /* stop submitting admin commands on a device that was reset */ ena_com_set_admin_running_state(ena_dev, false); ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); err_worker_destroy: del_timer(&adapter->timer_service); err_device_destroy: ena_com_delete_host_info(ena_dev); ena_com_admin_destroy(ena_dev); err_netdev_destroy: free_netdev(netdev); err_free_region: ena_release_bars(ena_dev, pdev); err_free_ena_dev: vfree(ena_dev); err_disable_device: pci_disable_device(pdev); return rc; } /*****************************************************************************/ /* __ena_shutoff - Helper used in both PCI remove/shutdown routines * @pdev: PCI device information struct * @shutdown: Is it a shutdown operation? If false, means it is a removal * * __ena_shutoff is a helper routine that does the real work on shutdown and * removal paths; the difference between those paths is with regards to whether * dettach or unregister the netdevice. */ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) { struct ena_adapter *adapter = pci_get_drvdata(pdev); struct ena_com_dev *ena_dev; struct net_device *netdev; ena_dev = adapter->ena_dev; netdev = adapter->netdev; #ifdef CONFIG_RFS_ACCEL if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) { free_irq_cpu_rmap(netdev->rx_cpu_rmap); netdev->rx_cpu_rmap = NULL; } #endif /* CONFIG_RFS_ACCEL */ /* Make sure timer and reset routine won't be called after * freeing device resources. */ del_timer_sync(&adapter->timer_service); cancel_work_sync(&adapter->reset_task); rtnl_lock(); /* lock released inside the below if-else block */ adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN; ena_destroy_device(adapter, true); if (shutdown) { netif_device_detach(netdev); dev_close(netdev); rtnl_unlock(); } else { rtnl_unlock(); unregister_netdev(netdev); free_netdev(netdev); } ena_com_rss_destroy(ena_dev); ena_com_delete_debug_area(ena_dev); ena_com_delete_host_info(ena_dev); ena_release_bars(ena_dev, pdev); pci_disable_device(pdev); vfree(ena_dev); } /* ena_remove - Device Removal Routine * @pdev: PCI device information struct * * ena_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. */ static void ena_remove(struct pci_dev *pdev) { __ena_shutoff(pdev, false); } /* ena_shutdown - Device Shutdown Routine * @pdev: PCI device information struct * * ena_shutdown is called by the PCI subsystem to alert the driver that * a shutdown/reboot (or kexec) is happening and device must be disabled. */ static void ena_shutdown(struct pci_dev *pdev) { __ena_shutoff(pdev, true); } /* ena_suspend - PM suspend callback * @dev_d: Device information struct */ static int __maybe_unused ena_suspend(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); struct ena_adapter *adapter = pci_get_drvdata(pdev); ena_increase_stat(&adapter->dev_stats.suspend, 1, &adapter->syncp); rtnl_lock(); if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { dev_err(&pdev->dev, "Ignoring device reset request as the device is being suspended\n"); clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); } ena_destroy_device(adapter, true); rtnl_unlock(); return 0; } /* ena_resume - PM resume callback * @dev_d: Device information struct */ static int __maybe_unused ena_resume(struct device *dev_d) { struct ena_adapter *adapter = dev_get_drvdata(dev_d); int rc; ena_increase_stat(&adapter->dev_stats.resume, 1, &adapter->syncp); rtnl_lock(); rc = ena_restore_device(adapter); rtnl_unlock(); return rc; } static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume); static struct pci_driver ena_pci_driver = { .name = DRV_MODULE_NAME, .id_table = ena_pci_tbl, .probe = ena_probe, .remove = ena_remove, .shutdown = ena_shutdown, .driver.pm = &ena_pm_ops, .sriov_configure = pci_sriov_configure_simple, }; static int __init ena_init(void) { int ret; ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME); if (!ena_wq) { pr_err("Failed to create workqueue\n"); return -ENOMEM; } ret = pci_register_driver(&ena_pci_driver); if (ret) destroy_workqueue(ena_wq); return ret; } static void __exit ena_cleanup(void) { pci_unregister_driver(&ena_pci_driver); if (ena_wq) { destroy_workqueue(ena_wq); ena_wq = NULL; } } /****************************************************************************** ******************************** AENQ Handlers ******************************* *****************************************************************************/ /* ena_update_on_link_change: * Notify the network interface about the change in link status */ static void ena_update_on_link_change(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_admin_aenq_link_change_desc *aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; int status = aenq_desc->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; if (status) { netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__); set_bit(ENA_FLAG_LINK_UP, &adapter->flags); if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags)) netif_carrier_on(adapter->netdev); } else { clear_bit(ENA_FLAG_LINK_UP, &adapter->flags); netif_carrier_off(adapter->netdev); } } static void ena_keep_alive_wd(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_admin_aenq_keep_alive_desc *desc; u64 rx_drops; u64 tx_drops; desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; adapter->last_keep_alive_jiffies = jiffies; rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low; tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low; u64_stats_update_begin(&adapter->syncp); /* These stats are accumulated by the device, so the counters indicate * all drops since last reset. */ adapter->dev_stats.rx_drops = rx_drops; adapter->dev_stats.tx_drops = tx_drops; u64_stats_update_end(&adapter->syncp); } static void ena_notification(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_admin_ena_hw_hints *hints; WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, "Invalid group(%x) expected %x\n", aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION); switch (aenq_e->aenq_common_desc.syndrome) { case ENA_ADMIN_UPDATE_HINTS: hints = (struct ena_admin_ena_hw_hints *) (&aenq_e->inline_data_w4); ena_update_hints(adapter, hints); break; default: netif_err(adapter, drv, adapter->netdev, "Invalid aenq notification link state %d\n", aenq_e->aenq_common_desc.syndrome); } } /* This handler will called for unknown event group or unimplemented handlers*/ static void unimplemented_aenq_handler(void *data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)data; netif_err(adapter, drv, adapter->netdev, "Unknown event was received or event with unimplemented handler\n"); } static struct ena_aenq_handlers aenq_handlers = { .handlers = { [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, [ENA_ADMIN_NOTIFICATION] = ena_notification, [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, }, .unimplemented_handler = unimplemented_aenq_handler }; module_init(ena_init); module_exit(ena_cleanup);
linux-master
drivers/net/ethernet/amazon/ena/ena_netdev.c
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. */ #include <linux/ethtool.h> #include <linux/pci.h> #include "ena_netdev.h" struct ena_stats { char name[ETH_GSTRING_LEN]; int stat_offset; }; #define ENA_STAT_ENA_COM_ENTRY(stat) { \ .name = #stat, \ .stat_offset = offsetof(struct ena_com_stats_admin, stat) / sizeof(u64) \ } #define ENA_STAT_ENTRY(stat, stat_type) { \ .name = #stat, \ .stat_offset = offsetof(struct ena_stats_##stat_type, stat) / sizeof(u64) \ } #define ENA_STAT_HW_ENTRY(stat, stat_type) { \ .name = #stat, \ .stat_offset = offsetof(struct ena_admin_##stat_type, stat) / sizeof(u64) \ } #define ENA_STAT_RX_ENTRY(stat) \ ENA_STAT_ENTRY(stat, rx) #define ENA_STAT_TX_ENTRY(stat) \ ENA_STAT_ENTRY(stat, tx) #define ENA_STAT_GLOBAL_ENTRY(stat) \ ENA_STAT_ENTRY(stat, dev) #define ENA_STAT_ENI_ENTRY(stat) \ ENA_STAT_HW_ENTRY(stat, eni_stats) static const struct ena_stats ena_stats_global_strings[] = { ENA_STAT_GLOBAL_ENTRY(tx_timeout), ENA_STAT_GLOBAL_ENTRY(suspend), ENA_STAT_GLOBAL_ENTRY(resume), ENA_STAT_GLOBAL_ENTRY(wd_expired), ENA_STAT_GLOBAL_ENTRY(interface_up), ENA_STAT_GLOBAL_ENTRY(interface_down), ENA_STAT_GLOBAL_ENTRY(admin_q_pause), }; static const struct ena_stats ena_stats_eni_strings[] = { ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded), ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded), ENA_STAT_ENI_ENTRY(pps_allowance_exceeded), ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded), ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded), }; static const struct ena_stats ena_stats_tx_strings[] = { ENA_STAT_TX_ENTRY(cnt), ENA_STAT_TX_ENTRY(bytes), ENA_STAT_TX_ENTRY(queue_stop), ENA_STAT_TX_ENTRY(queue_wakeup), ENA_STAT_TX_ENTRY(dma_mapping_err), ENA_STAT_TX_ENTRY(linearize), ENA_STAT_TX_ENTRY(linearize_failed), ENA_STAT_TX_ENTRY(napi_comp), ENA_STAT_TX_ENTRY(tx_poll), ENA_STAT_TX_ENTRY(doorbells), ENA_STAT_TX_ENTRY(prepare_ctx_err), ENA_STAT_TX_ENTRY(bad_req_id), ENA_STAT_TX_ENTRY(llq_buffer_copy), ENA_STAT_TX_ENTRY(missed_tx), ENA_STAT_TX_ENTRY(unmask_interrupt), }; static const struct ena_stats ena_stats_rx_strings[] = { ENA_STAT_RX_ENTRY(cnt), ENA_STAT_RX_ENTRY(bytes), ENA_STAT_RX_ENTRY(rx_copybreak_pkt), ENA_STAT_RX_ENTRY(csum_good), ENA_STAT_RX_ENTRY(refil_partial), ENA_STAT_RX_ENTRY(csum_bad), ENA_STAT_RX_ENTRY(page_alloc_fail), ENA_STAT_RX_ENTRY(skb_alloc_fail), ENA_STAT_RX_ENTRY(dma_mapping_err), ENA_STAT_RX_ENTRY(bad_desc_num), ENA_STAT_RX_ENTRY(bad_req_id), ENA_STAT_RX_ENTRY(empty_rx_ring), ENA_STAT_RX_ENTRY(csum_unchecked), ENA_STAT_RX_ENTRY(xdp_aborted), ENA_STAT_RX_ENTRY(xdp_drop), ENA_STAT_RX_ENTRY(xdp_pass), ENA_STAT_RX_ENTRY(xdp_tx), ENA_STAT_RX_ENTRY(xdp_invalid), ENA_STAT_RX_ENTRY(xdp_redirect), }; static const struct ena_stats ena_stats_ena_com_strings[] = { ENA_STAT_ENA_COM_ENTRY(aborted_cmd), ENA_STAT_ENA_COM_ENTRY(submitted_cmd), ENA_STAT_ENA_COM_ENTRY(completed_cmd), ENA_STAT_ENA_COM_ENTRY(out_of_space), ENA_STAT_ENA_COM_ENTRY(no_completion), }; #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) #define ENA_STATS_ARRAY_ENI(adapter) ARRAY_SIZE(ena_stats_eni_strings) static void ena_safe_update_stat(u64 *src, u64 *dst, struct u64_stats_sync *syncp) { unsigned int start; do { start = u64_stats_fetch_begin(syncp); *(dst) = *src; } while (u64_stats_fetch_retry(syncp, start)); } static void ena_queue_stats(struct ena_adapter *adapter, u64 **data) { const struct ena_stats *ena_stats; struct ena_ring *ring; u64 *ptr; int i, j; for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { /* Tx stats */ ring = &adapter->tx_ring[i]; for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { ena_stats = &ena_stats_tx_strings[j]; ptr = (u64 *)&ring->tx_stats + ena_stats->stat_offset; ena_safe_update_stat(ptr, (*data)++, &ring->syncp); } /* XDP TX queues don't have a RX queue counterpart */ if (!ENA_IS_XDP_INDEX(adapter, i)) { /* Rx stats */ ring = &adapter->rx_ring[i]; for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { ena_stats = &ena_stats_rx_strings[j]; ptr = (u64 *)&ring->rx_stats + ena_stats->stat_offset; ena_safe_update_stat(ptr, (*data)++, &ring->syncp); } } } } static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data) { const struct ena_stats *ena_stats; u64 *ptr; int i; for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { ena_stats = &ena_stats_ena_com_strings[i]; ptr = (u64 *)&adapter->ena_dev->admin_queue.stats + ena_stats->stat_offset; *(*data)++ = *ptr; } } static void ena_get_stats(struct ena_adapter *adapter, u64 *data, bool eni_stats_needed) { const struct ena_stats *ena_stats; u64 *ptr; int i; for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { ena_stats = &ena_stats_global_strings[i]; ptr = (u64 *)&adapter->dev_stats + ena_stats->stat_offset; ena_safe_update_stat(ptr, data++, &adapter->syncp); } if (eni_stats_needed) { ena_update_hw_stats(adapter); for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) { ena_stats = &ena_stats_eni_strings[i]; ptr = (u64 *)&adapter->eni_stats + ena_stats->stat_offset; ena_safe_update_stat(ptr, data++, &adapter->syncp); } } ena_queue_stats(adapter, &data); ena_dev_admin_queue_stats(adapter, &data); } static void ena_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct ena_adapter *adapter = netdev_priv(netdev); struct ena_com_dev *dev = adapter->ena_dev; ena_get_stats(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)); } static int ena_get_sw_stats_count(struct ena_adapter *adapter) { return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + adapter->xdp_num_queues * ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; } static int ena_get_hw_stats_count(struct ena_adapter *adapter) { bool supported = ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS); return ENA_STATS_ARRAY_ENI(adapter) * supported; } int ena_get_sset_count(struct net_device *netdev, int sset) { struct ena_adapter *adapter = netdev_priv(netdev); switch (sset) { case ETH_SS_STATS: return ena_get_sw_stats_count(adapter) + ena_get_hw_stats_count(adapter); } return -EOPNOTSUPP; } static void ena_queue_strings(struct ena_adapter *adapter, u8 **data) { const struct ena_stats *ena_stats; bool is_xdp; int i, j; for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { is_xdp = ENA_IS_XDP_INDEX(adapter, i); /* Tx stats */ for (j = 0; j < ENA_STATS_ARRAY_TX; j++) { ena_stats = &ena_stats_tx_strings[j]; ethtool_sprintf(data, "queue_%u_%s_%s", i, is_xdp ? "xdp_tx" : "tx", ena_stats->name); } if (!is_xdp) { /* RX stats, in XDP there isn't a RX queue * counterpart */ for (j = 0; j < ENA_STATS_ARRAY_RX; j++) { ena_stats = &ena_stats_rx_strings[j]; ethtool_sprintf(data, "queue_%u_rx_%s", i, ena_stats->name); } } } } static void ena_com_dev_strings(u8 **data) { const struct ena_stats *ena_stats; int i; for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { ena_stats = &ena_stats_ena_com_strings[i]; ethtool_sprintf(data, "ena_admin_q_%s", ena_stats->name); } } static void ena_get_strings(struct ena_adapter *adapter, u8 *data, bool eni_stats_needed) { const struct ena_stats *ena_stats; int i; for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { ena_stats = &ena_stats_global_strings[i]; ethtool_sprintf(&data, ena_stats->name); } if (eni_stats_needed) { for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) { ena_stats = &ena_stats_eni_strings[i]; ethtool_sprintf(&data, ena_stats->name); } } ena_queue_strings(adapter, &data); ena_com_dev_strings(&data); } static void ena_get_ethtool_strings(struct net_device *netdev, u32 sset, u8 *data) { struct ena_adapter *adapter = netdev_priv(netdev); struct ena_com_dev *dev = adapter->ena_dev; switch (sset) { case ETH_SS_STATS: ena_get_strings(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)); break; } } static int ena_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *link_ksettings) { struct ena_adapter *adapter = netdev_priv(netdev); struct ena_com_dev *ena_dev = adapter->ena_dev; struct ena_admin_get_feature_link_desc *link; struct ena_admin_get_feat_resp feat_resp; int rc; rc = ena_com_get_link_params(ena_dev, &feat_resp); if (rc) return rc; link = &feat_resp.u.link; link_ksettings->base.speed = link->speed; if (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) { ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Autoneg); ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Autoneg); } link_ksettings->base.autoneg = (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) ? AUTONEG_ENABLE : AUTONEG_DISABLE; link_ksettings->base.duplex = DUPLEX_FULL; return 0; } static int ena_get_coalesce(struct net_device *net_dev, struct ethtool_coalesce *coalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct ena_adapter *adapter = netdev_priv(net_dev); struct ena_com_dev *ena_dev = adapter->ena_dev; if (!ena_com_interrupt_moderation_supported(ena_dev)) return -EOPNOTSUPP; coalesce->tx_coalesce_usecs = ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) * ena_dev->intr_delay_resolution; coalesce->rx_coalesce_usecs = ena_com_get_nonadaptive_moderation_interval_rx(ena_dev) * ena_dev->intr_delay_resolution; coalesce->use_adaptive_rx_coalesce = ena_com_get_adaptive_moderation_enabled(ena_dev); return 0; } static void ena_update_tx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter) { unsigned int val; int i; val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev); for (i = 0; i < adapter->num_io_queues; i++) adapter->tx_ring[i].smoothed_interval = val; } static void ena_update_rx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter) { unsigned int val; int i; val = ena_com_get_nonadaptive_moderation_interval_rx(adapter->ena_dev); for (i = 0; i < adapter->num_io_queues; i++) adapter->rx_ring[i].smoothed_interval = val; } static int ena_set_coalesce(struct net_device *net_dev, struct ethtool_coalesce *coalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct ena_adapter *adapter = netdev_priv(net_dev); struct ena_com_dev *ena_dev = adapter->ena_dev; int rc; if (!ena_com_interrupt_moderation_supported(ena_dev)) return -EOPNOTSUPP; rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev, coalesce->tx_coalesce_usecs); if (rc) return rc; ena_update_tx_rings_nonadaptive_intr_moderation(adapter); rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, coalesce->rx_coalesce_usecs); if (rc) return rc; ena_update_rx_rings_nonadaptive_intr_moderation(adapter); if (coalesce->use_adaptive_rx_coalesce && !ena_com_get_adaptive_moderation_enabled(ena_dev)) ena_com_enable_adaptive_moderation(ena_dev); if (!coalesce->use_adaptive_rx_coalesce && ena_com_get_adaptive_moderation_enabled(ena_dev)) ena_com_disable_adaptive_moderation(ena_dev); return 0; } static u32 ena_get_msglevel(struct net_device *netdev) { struct ena_adapter *adapter = netdev_priv(netdev); return adapter->msg_enable; } static void ena_set_msglevel(struct net_device *netdev, u32 value) { struct ena_adapter *adapter = netdev_priv(netdev); adapter->msg_enable = value; } static void ena_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct ena_adapter *adapter = netdev_priv(dev); strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strscpy(info->bus_info, pci_name(adapter->pdev), sizeof(info->bus_info)); } static void ena_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct ena_adapter *adapter = netdev_priv(netdev); ring->tx_max_pending = adapter->max_tx_ring_size; ring->rx_max_pending = adapter->max_rx_ring_size; if (adapter->ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { bool large_llq_supported = adapter->large_llq_header_supported; kernel_ring->tx_push = true; kernel_ring->tx_push_buf_len = adapter->ena_dev->tx_max_header_size; if (large_llq_supported) kernel_ring->tx_push_buf_max_len = ENA_LLQ_LARGE_HEADER; else kernel_ring->tx_push_buf_max_len = ENA_LLQ_HEADER; } else { kernel_ring->tx_push = false; kernel_ring->tx_push_buf_max_len = 0; kernel_ring->tx_push_buf_len = 0; } ring->tx_pending = adapter->tx_ring[0].ring_size; ring->rx_pending = adapter->rx_ring[0].ring_size; } static int ena_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct ena_adapter *adapter = netdev_priv(netdev); u32 new_tx_size, new_rx_size, new_tx_push_buf_len; bool changed = false; new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ? ENA_MIN_RING_SIZE : ring->tx_pending; new_tx_size = rounddown_pow_of_two(new_tx_size); new_rx_size = ring->rx_pending < ENA_MIN_RING_SIZE ? ENA_MIN_RING_SIZE : ring->rx_pending; new_rx_size = rounddown_pow_of_two(new_rx_size); changed |= new_tx_size != adapter->requested_tx_ring_size || new_rx_size != adapter->requested_rx_ring_size; /* This value is ignored if LLQ is not supported */ new_tx_push_buf_len = adapter->ena_dev->tx_max_header_size; if ((adapter->ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) != kernel_ring->tx_push) { NL_SET_ERR_MSG_MOD(extack, "Push mode state cannot be modified"); return -EINVAL; } /* Validate that the push buffer is supported on the underlying device */ if (kernel_ring->tx_push_buf_len) { enum ena_admin_placement_policy_type placement; new_tx_push_buf_len = kernel_ring->tx_push_buf_len; placement = adapter->ena_dev->tx_mem_queue_type; if (placement == ENA_ADMIN_PLACEMENT_POLICY_HOST) return -EOPNOTSUPP; if (new_tx_push_buf_len != ENA_LLQ_HEADER && new_tx_push_buf_len != ENA_LLQ_LARGE_HEADER) { bool large_llq_sup = adapter->large_llq_header_supported; char large_llq_size_str[40]; snprintf(large_llq_size_str, 40, ", %lu", ENA_LLQ_LARGE_HEADER); NL_SET_ERR_MSG_FMT_MOD(extack, "Supported tx push buff values: [%lu%s]", ENA_LLQ_HEADER, large_llq_sup ? large_llq_size_str : ""); return -EINVAL; } changed |= new_tx_push_buf_len != adapter->ena_dev->tx_max_header_size; } if (!changed) return 0; return ena_update_queue_params(adapter, new_tx_size, new_rx_size, new_tx_push_buf_len); } static u32 ena_flow_hash_to_flow_type(u16 hash_fields) { u32 data = 0; if (hash_fields & ENA_ADMIN_RSS_L2_DA) data |= RXH_L2DA; if (hash_fields & ENA_ADMIN_RSS_L3_DA) data |= RXH_IP_DST; if (hash_fields & ENA_ADMIN_RSS_L3_SA) data |= RXH_IP_SRC; if (hash_fields & ENA_ADMIN_RSS_L4_DP) data |= RXH_L4_B_2_3; if (hash_fields & ENA_ADMIN_RSS_L4_SP) data |= RXH_L4_B_0_1; return data; } static u16 ena_flow_data_to_flow_hash(u32 hash_fields) { u16 data = 0; if (hash_fields & RXH_L2DA) data |= ENA_ADMIN_RSS_L2_DA; if (hash_fields & RXH_IP_DST) data |= ENA_ADMIN_RSS_L3_DA; if (hash_fields & RXH_IP_SRC) data |= ENA_ADMIN_RSS_L3_SA; if (hash_fields & RXH_L4_B_2_3) data |= ENA_ADMIN_RSS_L4_DP; if (hash_fields & RXH_L4_B_0_1) data |= ENA_ADMIN_RSS_L4_SP; return data; } static int ena_get_rss_hash(struct ena_com_dev *ena_dev, struct ethtool_rxnfc *cmd) { enum ena_admin_flow_hash_proto proto; u16 hash_fields; int rc; cmd->data = 0; switch (cmd->flow_type) { case TCP_V4_FLOW: proto = ENA_ADMIN_RSS_TCP4; break; case UDP_V4_FLOW: proto = ENA_ADMIN_RSS_UDP4; break; case TCP_V6_FLOW: proto = ENA_ADMIN_RSS_TCP6; break; case UDP_V6_FLOW: proto = ENA_ADMIN_RSS_UDP6; break; case IPV4_FLOW: proto = ENA_ADMIN_RSS_IP4; break; case IPV6_FLOW: proto = ENA_ADMIN_RSS_IP6; break; case ETHER_FLOW: proto = ENA_ADMIN_RSS_NOT_IP; break; case AH_V4_FLOW: case ESP_V4_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: return -EOPNOTSUPP; default: return -EINVAL; } rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields); if (rc) return rc; cmd->data = ena_flow_hash_to_flow_type(hash_fields); return 0; } static int ena_set_rss_hash(struct ena_com_dev *ena_dev, struct ethtool_rxnfc *cmd) { enum ena_admin_flow_hash_proto proto; u16 hash_fields; switch (cmd->flow_type) { case TCP_V4_FLOW: proto = ENA_ADMIN_RSS_TCP4; break; case UDP_V4_FLOW: proto = ENA_ADMIN_RSS_UDP4; break; case TCP_V6_FLOW: proto = ENA_ADMIN_RSS_TCP6; break; case UDP_V6_FLOW: proto = ENA_ADMIN_RSS_UDP6; break; case IPV4_FLOW: proto = ENA_ADMIN_RSS_IP4; break; case IPV6_FLOW: proto = ENA_ADMIN_RSS_IP6; break; case ETHER_FLOW: proto = ENA_ADMIN_RSS_NOT_IP; break; case AH_V4_FLOW: case ESP_V4_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: return -EOPNOTSUPP; default: return -EINVAL; } hash_fields = ena_flow_data_to_flow_hash(cmd->data); return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields); } static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) { struct ena_adapter *adapter = netdev_priv(netdev); int rc = 0; switch (info->cmd) { case ETHTOOL_SRXFH: rc = ena_set_rss_hash(adapter->ena_dev, info); break; case ETHTOOL_SRXCLSRLDEL: case ETHTOOL_SRXCLSRLINS: default: netif_err(adapter, drv, netdev, "Command parameter %d is not supported\n", info->cmd); rc = -EOPNOTSUPP; } return rc; } static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, u32 *rules) { struct ena_adapter *adapter = netdev_priv(netdev); int rc = 0; switch (info->cmd) { case ETHTOOL_GRXRINGS: info->data = adapter->num_io_queues; rc = 0; break; case ETHTOOL_GRXFH: rc = ena_get_rss_hash(adapter->ena_dev, info); break; case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: case ETHTOOL_GRXCLSRLALL: default: netif_err(adapter, drv, netdev, "Command parameter %d is not supported\n", info->cmd); rc = -EOPNOTSUPP; } return rc; } static u32 ena_get_rxfh_indir_size(struct net_device *netdev) { return ENA_RX_RSS_TABLE_SIZE; } static u32 ena_get_rxfh_key_size(struct net_device *netdev) { return ENA_HASH_KEY_SIZE; } static int ena_indirection_table_set(struct ena_adapter *adapter, const u32 *indir) { struct ena_com_dev *ena_dev = adapter->ena_dev; int i, rc; for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { rc = ena_com_indirect_table_fill_entry(ena_dev, i, ENA_IO_RXQ_IDX(indir[i])); if (unlikely(rc)) { netif_err(adapter, drv, adapter->netdev, "Cannot fill indirect table (index is too large)\n"); return rc; } } rc = ena_com_indirect_table_set(ena_dev); if (rc) { netif_err(adapter, drv, adapter->netdev, "Cannot set indirect table\n"); return rc == -EPERM ? -EOPNOTSUPP : rc; } return rc; } static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir) { struct ena_com_dev *ena_dev = adapter->ena_dev; int i, rc; if (!indir) return 0; rc = ena_com_indirect_table_get(ena_dev, indir); if (rc) return rc; /* Our internal representation of the indices is: even indices * for Tx and uneven indices for Rx. We need to convert the Rx * indices to be consecutive */ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]); return rc; } static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct ena_adapter *adapter = netdev_priv(netdev); enum ena_admin_hash_functions ena_func; u8 func; int rc; rc = ena_indirection_table_get(adapter, indir); if (rc) return rc; /* We call this function in order to check if the device * supports getting/setting the hash function. */ rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func); if (rc) { if (rc == -EOPNOTSUPP) rc = 0; return rc; } rc = ena_com_get_hash_key(adapter->ena_dev, key); if (rc) return rc; switch (ena_func) { case ENA_ADMIN_TOEPLITZ: func = ETH_RSS_HASH_TOP; break; case ENA_ADMIN_CRC32: func = ETH_RSS_HASH_CRC32; break; default: netif_err(adapter, drv, netdev, "Command parameter is not supported\n"); return -EOPNOTSUPP; } if (hfunc) *hfunc = func; return 0; } static int ena_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { struct ena_adapter *adapter = netdev_priv(netdev); struct ena_com_dev *ena_dev = adapter->ena_dev; enum ena_admin_hash_functions func = 0; int rc; if (indir) { rc = ena_indirection_table_set(adapter, indir); if (rc) return rc; } switch (hfunc) { case ETH_RSS_HASH_NO_CHANGE: func = ena_com_get_current_hash_function(ena_dev); break; case ETH_RSS_HASH_TOP: func = ENA_ADMIN_TOEPLITZ; break; case ETH_RSS_HASH_CRC32: func = ENA_ADMIN_CRC32; break; default: netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n", hfunc); return -EOPNOTSUPP; } if (key || func) { rc = ena_com_fill_hash_function(ena_dev, func, key, ENA_HASH_KEY_SIZE, 0xFFFFFFFF); if (unlikely(rc)) { netif_err(adapter, drv, netdev, "Cannot fill key\n"); return rc == -EPERM ? -EOPNOTSUPP : rc; } } return 0; } static void ena_get_channels(struct net_device *netdev, struct ethtool_channels *channels) { struct ena_adapter *adapter = netdev_priv(netdev); channels->max_combined = adapter->max_num_io_queues; channels->combined_count = adapter->num_io_queues; } static int ena_set_channels(struct net_device *netdev, struct ethtool_channels *channels) { struct ena_adapter *adapter = netdev_priv(netdev); u32 count = channels->combined_count; /* The check for max value is already done in ethtool */ if (count < ENA_MIN_NUM_IO_QUEUES) return -EINVAL; if (!ena_xdp_legal_queue_count(adapter, count)) { if (ena_xdp_present(adapter)) return -EINVAL; xdp_clear_features_flag(netdev); } else { xdp_set_features_flag(netdev, NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT); } return ena_update_queue_count(adapter, count); } static int ena_get_tunable(struct net_device *netdev, const struct ethtool_tunable *tuna, void *data) { struct ena_adapter *adapter = netdev_priv(netdev); int ret = 0; switch (tuna->id) { case ETHTOOL_RX_COPYBREAK: *(u32 *)data = adapter->rx_copybreak; break; default: ret = -EINVAL; break; } return ret; } static int ena_set_tunable(struct net_device *netdev, const struct ethtool_tunable *tuna, const void *data) { struct ena_adapter *adapter = netdev_priv(netdev); int ret = 0; u32 len; switch (tuna->id) { case ETHTOOL_RX_COPYBREAK: len = *(u32 *)data; ret = ena_set_rx_copybreak(adapter, len); break; default: ret = -EINVAL; break; } return ret; } static const struct ethtool_ops ena_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, .supported_ring_params = ETHTOOL_RING_USE_TX_PUSH_BUF_LEN | ETHTOOL_RING_USE_TX_PUSH, .get_link_ksettings = ena_get_link_ksettings, .get_drvinfo = ena_get_drvinfo, .get_msglevel = ena_get_msglevel, .set_msglevel = ena_set_msglevel, .get_link = ethtool_op_get_link, .get_coalesce = ena_get_coalesce, .set_coalesce = ena_set_coalesce, .get_ringparam = ena_get_ringparam, .set_ringparam = ena_set_ringparam, .get_sset_count = ena_get_sset_count, .get_strings = ena_get_ethtool_strings, .get_ethtool_stats = ena_get_ethtool_stats, .get_rxnfc = ena_get_rxnfc, .set_rxnfc = ena_set_rxnfc, .get_rxfh_indir_size = ena_get_rxfh_indir_size, .get_rxfh_key_size = ena_get_rxfh_key_size, .get_rxfh = ena_get_rxfh, .set_rxfh = ena_set_rxfh, .get_channels = ena_get_channels, .set_channels = ena_set_channels, .get_tunable = ena_get_tunable, .set_tunable = ena_set_tunable, .get_ts_info = ethtool_op_get_ts_info, }; void ena_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &ena_ethtool_ops; } static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf) { struct net_device *netdev = adapter->netdev; u8 *strings_buf; u64 *data_buf; int strings_num; int i, rc; strings_num = ena_get_sw_stats_count(adapter); if (strings_num <= 0) { netif_err(adapter, drv, netdev, "Can't get stats num\n"); return; } strings_buf = devm_kcalloc(&adapter->pdev->dev, ETH_GSTRING_LEN, strings_num, GFP_ATOMIC); if (!strings_buf) { netif_err(adapter, drv, netdev, "Failed to allocate strings_buf\n"); return; } data_buf = devm_kcalloc(&adapter->pdev->dev, strings_num, sizeof(u64), GFP_ATOMIC); if (!data_buf) { netif_err(adapter, drv, netdev, "Failed to allocate data buf\n"); devm_kfree(&adapter->pdev->dev, strings_buf); return; } ena_get_strings(adapter, strings_buf, false); ena_get_stats(adapter, data_buf, false); /* If there is a buffer, dump stats, otherwise print them to dmesg */ if (buf) for (i = 0; i < strings_num; i++) { rc = snprintf(buf, ETH_GSTRING_LEN + sizeof(u64), "%s %llu\n", strings_buf + i * ETH_GSTRING_LEN, data_buf[i]); buf += rc; } else for (i = 0; i < strings_num; i++) netif_err(adapter, drv, netdev, "%s: %llu\n", strings_buf + i * ETH_GSTRING_LEN, data_buf[i]); devm_kfree(&adapter->pdev->dev, strings_buf); devm_kfree(&adapter->pdev->dev, data_buf); } void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf) { if (!buf) return; ena_dump_stats_ex(adapter, buf); } void ena_dump_stats_to_dmesg(struct ena_adapter *adapter) { ena_dump_stats_ex(adapter, NULL); }
linux-master
drivers/net/ethernet/amazon/ena/ena_ethtool.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * meth.c -- O2 Builtin 10/100 Ethernet driver * * Copyright (C) 2001-2003 Ilya Volynets */ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/device.h> /* struct device, et al */ #include <linux/netdevice.h> /* struct device, and other headers */ #include <linux/etherdevice.h> /* eth_type_trans */ #include <linux/ip.h> /* struct iphdr */ #include <linux/tcp.h> /* struct tcphdr */ #include <linux/skbuff.h> #include <linux/mii.h> /* MII definitions */ #include <linux/crc32.h> #include <asm/ip32/mace.h> #include <asm/ip32/ip32_ints.h> #include <asm/io.h> #include "meth.h" #ifndef MFE_DEBUG #define MFE_DEBUG 0 #endif #if MFE_DEBUG>=1 #define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __func__ , ## args) #define MFE_RX_DEBUG 2 #else #define DPRINTK(str,args...) #define MFE_RX_DEBUG 0 #endif static const char *meth_str="SGI O2 Fast Ethernet"; /* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */ #define TX_TIMEOUT (400*HZ/1000) static int timeout = TX_TIMEOUT; module_param(timeout, int, 0); /* * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). * MACE Ethernet uses a 64 element hash table based on the Ethernet CRC. */ #define METH_MCF_LIMIT 32 /* * This structure is private to each device. It is used to pass * packets in and out, so there is place for a packet */ struct meth_private { struct platform_device *pdev; /* in-memory copy of MAC Control register */ u64 mac_ctrl; /* in-memory copy of DMA Control register */ unsigned long dma_ctrl; /* address of PHY, used by mdio_* functions, initialized in mdio_probe */ unsigned long phy_addr; tx_packet *tx_ring; dma_addr_t tx_ring_dma; struct sk_buff *tx_skbs[TX_RING_ENTRIES]; dma_addr_t tx_skb_dmas[TX_RING_ENTRIES]; unsigned long tx_read, tx_write, tx_count; rx_packet *rx_ring[RX_RING_ENTRIES]; dma_addr_t rx_ring_dmas[RX_RING_ENTRIES]; struct sk_buff *rx_skbs[RX_RING_ENTRIES]; unsigned long rx_write; /* Multicast filter. */ u64 mcast_filter; spinlock_t meth_lock; }; static void meth_tx_timeout(struct net_device *dev, unsigned int txqueue); static irqreturn_t meth_interrupt(int irq, void *dev_id); /* global, initialized in ip32-setup.c */ char o2meth_eaddr[8]={0,0,0,0,0,0,0,0}; static inline void load_eaddr(struct net_device *dev) { int i; u64 macaddr; DPRINTK("Loading MAC Address: %pM\n", dev->dev_addr); macaddr = 0; for (i = 0; i < 6; i++) macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); mace->eth.mac_addr = macaddr; } /* * Waits for BUSY status of mdio bus to clear */ #define WAIT_FOR_PHY(___rval) \ while ((___rval = mace->eth.phy_data) & MDIO_BUSY) { \ udelay(25); \ } /*read phy register, return value read */ static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg) { unsigned long rval; WAIT_FOR_PHY(rval); mace->eth.phy_regs = (priv->phy_addr << 5) | (phyreg & 0x1f); udelay(25); mace->eth.phy_trans_go = 1; udelay(25); WAIT_FOR_PHY(rval); return rval & MDIO_DATA_MASK; } static int mdio_probe(struct meth_private *priv) { int i; unsigned long p2, p3, flags; /* check if phy is detected already */ if(priv->phy_addr>=0&&priv->phy_addr<32) return 0; spin_lock_irqsave(&priv->meth_lock, flags); for (i=0;i<32;++i){ priv->phy_addr=i; p2=mdio_read(priv,2); p3=mdio_read(priv,3); #if MFE_DEBUG>=2 switch ((p2<<12)|(p3>>4)){ case PHY_QS6612X: DPRINTK("PHY is QS6612X\n"); break; case PHY_ICS1889: DPRINTK("PHY is ICS1889\n"); break; case PHY_ICS1890: DPRINTK("PHY is ICS1890\n"); break; case PHY_DP83840: DPRINTK("PHY is DP83840\n"); break; } #endif if(p2!=0xffff&&p2!=0x0000){ DPRINTK("PHY code: %x\n",(p2<<12)|(p3>>4)); break; } } spin_unlock_irqrestore(&priv->meth_lock, flags); if(priv->phy_addr<32) { return 0; } DPRINTK("Oopsie! PHY is not known!\n"); priv->phy_addr=-1; return -ENODEV; } static void meth_check_link(struct net_device *dev) { struct meth_private *priv = netdev_priv(dev); unsigned long mii_advertising = mdio_read(priv, 4); unsigned long mii_partner = mdio_read(priv, 5); unsigned long negotiated = mii_advertising & mii_partner; unsigned long duplex, speed; if (mii_partner == 0xffff) return; speed = (negotiated & 0x0380) ? METH_100MBIT : 0; duplex = ((negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040) ? METH_PHY_FDX : 0; if ((priv->mac_ctrl & METH_PHY_FDX) ^ duplex) { DPRINTK("Setting %s-duplex\n", duplex ? "full" : "half"); if (duplex) priv->mac_ctrl |= METH_PHY_FDX; else priv->mac_ctrl &= ~METH_PHY_FDX; mace->eth.mac_ctrl = priv->mac_ctrl; } if ((priv->mac_ctrl & METH_100MBIT) ^ speed) { DPRINTK("Setting %dMbs mode\n", speed ? 100 : 10); if (duplex) priv->mac_ctrl |= METH_100MBIT; else priv->mac_ctrl &= ~METH_100MBIT; mace->eth.mac_ctrl = priv->mac_ctrl; } } static int meth_init_tx_ring(struct meth_private *priv) { /* Init TX ring */ priv->tx_ring = dma_alloc_coherent(&priv->pdev->dev, TX_RING_BUFFER_SIZE, &priv->tx_ring_dma, GFP_ATOMIC); if (!priv->tx_ring) return -ENOMEM; priv->tx_count = priv->tx_read = priv->tx_write = 0; mace->eth.tx_ring_base = priv->tx_ring_dma; /* Now init skb save area */ memset(priv->tx_skbs, 0, sizeof(priv->tx_skbs)); memset(priv->tx_skb_dmas, 0, sizeof(priv->tx_skb_dmas)); return 0; } static int meth_init_rx_ring(struct meth_private *priv) { int i; for (i = 0; i < RX_RING_ENTRIES; i++) { priv->rx_skbs[i] = alloc_skb(METH_RX_BUFF_SIZE, 0); /* 8byte status vector + 3quad padding + 2byte padding, * to put data on 64bit aligned boundary */ skb_reserve(priv->rx_skbs[i],METH_RX_HEAD); priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head); /* I'll need to re-sync it after each RX */ priv->rx_ring_dmas[i] = dma_map_single(&priv->pdev->dev, priv->rx_ring[i], METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); mace->eth.rx_fifo = priv->rx_ring_dmas[i]; } priv->rx_write = 0; return 0; } static void meth_free_tx_ring(struct meth_private *priv) { int i; /* Remove any pending skb */ for (i = 0; i < TX_RING_ENTRIES; i++) { dev_kfree_skb(priv->tx_skbs[i]); priv->tx_skbs[i] = NULL; } dma_free_coherent(&priv->pdev->dev, TX_RING_BUFFER_SIZE, priv->tx_ring, priv->tx_ring_dma); } /* Presumes RX DMA engine is stopped, and RX fifo ring is reset */ static void meth_free_rx_ring(struct meth_private *priv) { int i; for (i = 0; i < RX_RING_ENTRIES; i++) { dma_unmap_single(&priv->pdev->dev, priv->rx_ring_dmas[i], METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); priv->rx_ring[i] = 0; priv->rx_ring_dmas[i] = 0; kfree_skb(priv->rx_skbs[i]); } } int meth_reset(struct net_device *dev) { struct meth_private *priv = netdev_priv(dev); /* Reset card */ mace->eth.mac_ctrl = SGI_MAC_RESET; udelay(1); mace->eth.mac_ctrl = 0; udelay(25); /* Load ethernet address */ load_eaddr(dev); /* Should load some "errata", but later */ /* Check for device */ if (mdio_probe(priv) < 0) { DPRINTK("Unable to find PHY\n"); return -ENODEV; } /* Initial mode: 10 | Half-duplex | Accept normal packets */ priv->mac_ctrl = METH_ACCEPT_MCAST | METH_DEFAULT_IPG; if (dev->flags & IFF_PROMISC) priv->mac_ctrl |= METH_PROMISC; mace->eth.mac_ctrl = priv->mac_ctrl; /* Autonegotiate speed and duplex mode */ meth_check_link(dev); /* Now set dma control, but don't enable DMA, yet */ priv->dma_ctrl = (4 << METH_RX_OFFSET_SHIFT) | (RX_RING_ENTRIES << METH_RX_DEPTH_SHIFT); mace->eth.dma_ctrl = priv->dma_ctrl; return 0; } /*============End Helper Routines=====================*/ /* * Open and close */ static int meth_open(struct net_device *dev) { struct meth_private *priv = netdev_priv(dev); int ret; priv->phy_addr = -1; /* No PHY is known yet... */ /* Initialize the hardware */ ret = meth_reset(dev); if (ret < 0) return ret; /* Allocate the ring buffers */ ret = meth_init_tx_ring(priv); if (ret < 0) return ret; ret = meth_init_rx_ring(priv); if (ret < 0) goto out_free_tx_ring; ret = request_irq(dev->irq, meth_interrupt, 0, meth_str, dev); if (ret) { printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq); goto out_free_rx_ring; } /* Start DMA */ priv->dma_ctrl |= METH_DMA_TX_EN | /*METH_DMA_TX_INT_EN |*/ METH_DMA_RX_EN | METH_DMA_RX_INT_EN; mace->eth.dma_ctrl = priv->dma_ctrl; DPRINTK("About to start queue\n"); netif_start_queue(dev); return 0; out_free_rx_ring: meth_free_rx_ring(priv); out_free_tx_ring: meth_free_tx_ring(priv); return ret; } static int meth_release(struct net_device *dev) { struct meth_private *priv = netdev_priv(dev); DPRINTK("Stopping queue\n"); netif_stop_queue(dev); /* can't transmit any more */ /* shut down DMA */ priv->dma_ctrl &= ~(METH_DMA_TX_EN | METH_DMA_TX_INT_EN | METH_DMA_RX_EN | METH_DMA_RX_INT_EN); mace->eth.dma_ctrl = priv->dma_ctrl; free_irq(dev->irq, dev); meth_free_tx_ring(priv); meth_free_rx_ring(priv); return 0; } /* * Receive a packet: retrieve, encapsulate and pass over to upper levels */ static void meth_rx(struct net_device* dev, unsigned long int_status) { struct sk_buff *skb; unsigned long status, flags; struct meth_private *priv = netdev_priv(dev); unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8; spin_lock_irqsave(&priv->meth_lock, flags); priv->dma_ctrl &= ~METH_DMA_RX_INT_EN; mace->eth.dma_ctrl = priv->dma_ctrl; spin_unlock_irqrestore(&priv->meth_lock, flags); if (int_status & METH_INT_RX_UNDERFLOW) { fifo_rptr = (fifo_rptr - 1) & 0x0f; } while (priv->rx_write != fifo_rptr) { dma_unmap_single(&priv->pdev->dev, priv->rx_ring_dmas[priv->rx_write], METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); status = priv->rx_ring[priv->rx_write]->status.raw; #if MFE_DEBUG if (!(status & METH_RX_ST_VALID)) { DPRINTK("Not received? status=%016lx\n",status); } #endif if ((!(status & METH_RX_STATUS_ERRORS)) && (status & METH_RX_ST_VALID)) { int len = (status & 0xffff) - 4; /* omit CRC */ /* length sanity check */ if (len < 60 || len > 1518) { printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2Lx.\n", dev->name, priv->rx_write, priv->rx_ring[priv->rx_write]->status.raw); dev->stats.rx_errors++; dev->stats.rx_length_errors++; skb = priv->rx_skbs[priv->rx_write]; } else { skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC); if (!skb) { /* Ouch! No memory! Drop packet on the floor */ DPRINTK("No mem: dropping packet\n"); dev->stats.rx_dropped++; skb = priv->rx_skbs[priv->rx_write]; } else { struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write]; /* 8byte status vector + 3quad padding + 2byte padding, * to put data on 64bit aligned boundary */ skb_reserve(skb, METH_RX_HEAD); /* Write metadata, and then pass to the receive level */ skb_put(skb_c, len); priv->rx_skbs[priv->rx_write] = skb; skb_c->protocol = eth_type_trans(skb_c, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += len; netif_rx(skb_c); } } } else { dev->stats.rx_errors++; skb=priv->rx_skbs[priv->rx_write]; #if MFE_DEBUG>0 printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status); if(status&METH_RX_ST_RCV_CODE_VIOLATION) printk(KERN_WARNING "Receive Code Violation\n"); if(status&METH_RX_ST_CRC_ERR) printk(KERN_WARNING "CRC error\n"); if(status&METH_RX_ST_INV_PREAMBLE_CTX) printk(KERN_WARNING "Invalid Preamble Context\n"); if(status&METH_RX_ST_LONG_EVT_SEEN) printk(KERN_WARNING "Long Event Seen...\n"); if(status&METH_RX_ST_BAD_PACKET) printk(KERN_WARNING "Bad Packet\n"); if(status&METH_RX_ST_CARRIER_EVT_SEEN) printk(KERN_WARNING "Carrier Event Seen\n"); #endif } priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head; priv->rx_ring[priv->rx_write]->status.raw = 0; priv->rx_ring_dmas[priv->rx_write] = dma_map_single(&priv->pdev->dev, priv->rx_ring[priv->rx_write], METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write]; ADVANCE_RX_PTR(priv->rx_write); } spin_lock_irqsave(&priv->meth_lock, flags); /* In case there was underflow, and Rx DMA was disabled */ priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN; mace->eth.dma_ctrl = priv->dma_ctrl; mace->eth.int_stat = METH_INT_RX_THRESHOLD; spin_unlock_irqrestore(&priv->meth_lock, flags); } static int meth_tx_full(struct net_device *dev) { struct meth_private *priv = netdev_priv(dev); return priv->tx_count >= TX_RING_ENTRIES - 1; } static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) { struct meth_private *priv = netdev_priv(dev); unsigned long status, flags; struct sk_buff *skb; unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16; spin_lock_irqsave(&priv->meth_lock, flags); /* Stop DMA notification */ priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); mace->eth.dma_ctrl = priv->dma_ctrl; while (priv->tx_read != rptr) { skb = priv->tx_skbs[priv->tx_read]; status = priv->tx_ring[priv->tx_read].header.raw; #if MFE_DEBUG>=1 if (priv->tx_read == priv->tx_write) DPRINTK("Auchi! tx_read=%d,tx_write=%d,rptr=%d?\n", priv->tx_read, priv->tx_write,rptr); #endif if (status & METH_TX_ST_DONE) { if (status & METH_TX_ST_SUCCESS){ dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; } else { dev->stats.tx_errors++; #if MFE_DEBUG>=1 DPRINTK("TX error: status=%016lx <",status); if(status & METH_TX_ST_SUCCESS) printk(" SUCCESS"); if(status & METH_TX_ST_TOOLONG) printk(" TOOLONG"); if(status & METH_TX_ST_UNDERRUN) printk(" UNDERRUN"); if(status & METH_TX_ST_EXCCOLL) printk(" EXCCOLL"); if(status & METH_TX_ST_DEFER) printk(" DEFER"); if(status & METH_TX_ST_LATECOLL) printk(" LATECOLL"); printk(" >\n"); #endif } } else { DPRINTK("RPTR points us here, but packet not done?\n"); break; } dev_consume_skb_irq(skb); priv->tx_skbs[priv->tx_read] = NULL; priv->tx_ring[priv->tx_read].header.raw = 0; priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1); priv->tx_count--; } /* wake up queue if it was stopped */ if (netif_queue_stopped(dev) && !meth_tx_full(dev)) { netif_wake_queue(dev); } mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT; spin_unlock_irqrestore(&priv->meth_lock, flags); } static void meth_error(struct net_device* dev, unsigned status) { struct meth_private *priv = netdev_priv(dev); unsigned long flags; printk(KERN_WARNING "meth: error status: 0x%08x\n",status); /* check for errors too... */ if (status & (METH_INT_TX_LINK_FAIL)) printk(KERN_WARNING "meth: link failure\n"); /* Should I do full reset in this case? */ if (status & (METH_INT_MEM_ERROR)) printk(KERN_WARNING "meth: memory error\n"); if (status & (METH_INT_TX_ABORT)) printk(KERN_WARNING "meth: aborted\n"); if (status & (METH_INT_RX_OVERFLOW)) printk(KERN_WARNING "meth: Rx overflow\n"); if (status & (METH_INT_RX_UNDERFLOW)) { printk(KERN_WARNING "meth: Rx underflow\n"); spin_lock_irqsave(&priv->meth_lock, flags); mace->eth.int_stat = METH_INT_RX_UNDERFLOW; /* more underflow interrupts will be delivered, * effectively throwing us into an infinite loop. * Thus I stop processing Rx in this case. */ priv->dma_ctrl &= ~METH_DMA_RX_EN; mace->eth.dma_ctrl = priv->dma_ctrl; DPRINTK("Disabled meth Rx DMA temporarily\n"); spin_unlock_irqrestore(&priv->meth_lock, flags); } mace->eth.int_stat = METH_INT_ERROR; } /* * The typical interrupt entry point */ static irqreturn_t meth_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct meth_private *priv = netdev_priv(dev); unsigned long status; status = mace->eth.int_stat; while (status & 0xff) { /* First handle errors - if we get Rx underflow, * Rx DMA will be disabled, and Rx handler will reenable * it. I don't think it's possible to get Rx underflow, * without getting Rx interrupt */ if (status & METH_INT_ERROR) { meth_error(dev, status); } if (status & (METH_INT_TX_EMPTY | METH_INT_TX_PKT)) { /* a transmission is over: free the skb */ meth_tx_cleanup(dev, status); } if (status & METH_INT_RX_THRESHOLD) { if (!(priv->dma_ctrl & METH_DMA_RX_INT_EN)) break; /* send it to meth_rx for handling */ meth_rx(dev, status); } status = mace->eth.int_stat; } return IRQ_HANDLED; } /* * Transmits packets that fit into TX descriptor (are <=120B) */ static void meth_tx_short_prepare(struct meth_private *priv, struct sk_buff *skb) { tx_packet *desc = &priv->tx_ring[priv->tx_write]; int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16); /* maybe I should set whole thing to 0 first... */ skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len); if (skb->len < len) memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len); } #define TX_CATBUF1 BIT(25) static void meth_tx_1page_prepare(struct meth_private *priv, struct sk_buff *skb) { tx_packet *desc = &priv->tx_ring[priv->tx_write]; void *buffer_data = (void *)(((unsigned long)skb->data + 7) & ~7); int unaligned_len = (int)((unsigned long)buffer_data - (unsigned long)skb->data); int buffer_len = skb->len - unaligned_len; dma_addr_t catbuf; desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | (skb->len - 1); /* unaligned part */ if (unaligned_len) { skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), unaligned_len); desc->header.raw |= (128 - unaligned_len) << 16; } /* first page */ catbuf = dma_map_single(&priv->pdev->dev, buffer_data, buffer_len, DMA_TO_DEVICE); desc->data.cat_buf[0].form.start_addr = catbuf >> 3; desc->data.cat_buf[0].form.len = buffer_len - 1; } #define TX_CATBUF2 BIT(26) static void meth_tx_2page_prepare(struct meth_private *priv, struct sk_buff *skb) { tx_packet *desc = &priv->tx_ring[priv->tx_write]; void *buffer1_data = (void *)(((unsigned long)skb->data + 7) & ~7); void *buffer2_data = (void *)PAGE_ALIGN((unsigned long)skb->data); int unaligned_len = (int)((unsigned long)buffer1_data - (unsigned long)skb->data); int buffer1_len = (int)((unsigned long)buffer2_data - (unsigned long)buffer1_data); int buffer2_len = skb->len - buffer1_len - unaligned_len; dma_addr_t catbuf1, catbuf2; desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1); /* unaligned part */ if (unaligned_len){ skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), unaligned_len); desc->header.raw |= (128 - unaligned_len) << 16; } /* first page */ catbuf1 = dma_map_single(&priv->pdev->dev, buffer1_data, buffer1_len, DMA_TO_DEVICE); desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3; desc->data.cat_buf[0].form.len = buffer1_len - 1; /* second page */ catbuf2 = dma_map_single(&priv->pdev->dev, buffer2_data, buffer2_len, DMA_TO_DEVICE); desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3; desc->data.cat_buf[1].form.len = buffer2_len - 1; } static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb) { /* Remember the skb, so we can free it at interrupt time */ priv->tx_skbs[priv->tx_write] = skb; if (skb->len <= 120) { /* Whole packet fits into descriptor */ meth_tx_short_prepare(priv, skb); } else if (PAGE_ALIGN((unsigned long)skb->data) != PAGE_ALIGN((unsigned long)skb->data + skb->len - 1)) { /* Packet crosses page boundary */ meth_tx_2page_prepare(priv, skb); } else { /* Packet is in one page */ meth_tx_1page_prepare(priv, skb); } priv->tx_write = (priv->tx_write + 1) & (TX_RING_ENTRIES - 1); mace->eth.tx_info = priv->tx_write; priv->tx_count++; } /* * Transmit a packet (called by the kernel) */ static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev) { struct meth_private *priv = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&priv->meth_lock, flags); /* Stop DMA notification */ priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); mace->eth.dma_ctrl = priv->dma_ctrl; meth_add_to_tx_ring(priv, skb); netif_trans_update(dev); /* save the timestamp */ /* If TX ring is full, tell the upper layer to stop sending packets */ if (meth_tx_full(dev)) { printk(KERN_DEBUG "TX full: stopping\n"); netif_stop_queue(dev); } /* Restart DMA notification */ priv->dma_ctrl |= METH_DMA_TX_INT_EN; mace->eth.dma_ctrl = priv->dma_ctrl; spin_unlock_irqrestore(&priv->meth_lock, flags); return NETDEV_TX_OK; } /* * Deal with a transmit timeout. */ static void meth_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct meth_private *priv = netdev_priv(dev); unsigned long flags; printk(KERN_WARNING "%s: transmit timed out\n", dev->name); /* Protect against concurrent rx interrupts */ spin_lock_irqsave(&priv->meth_lock,flags); /* Try to reset the interface. */ meth_reset(dev); dev->stats.tx_errors++; /* Clear all rings */ meth_free_tx_ring(priv); meth_free_rx_ring(priv); meth_init_tx_ring(priv); meth_init_rx_ring(priv); /* Restart dma */ priv->dma_ctrl |= METH_DMA_TX_EN | METH_DMA_RX_EN | METH_DMA_RX_INT_EN; mace->eth.dma_ctrl = priv->dma_ctrl; /* Enable interrupt */ spin_unlock_irqrestore(&priv->meth_lock, flags); netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } /* * Ioctl commands */ static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { /* XXX Not yet implemented */ switch(cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: default: return -EOPNOTSUPP; } } static void meth_set_rx_mode(struct net_device *dev) { struct meth_private *priv = netdev_priv(dev); unsigned long flags; netif_stop_queue(dev); spin_lock_irqsave(&priv->meth_lock, flags); priv->mac_ctrl &= ~METH_PROMISC; if (dev->flags & IFF_PROMISC) { priv->mac_ctrl |= METH_PROMISC; priv->mcast_filter = 0xffffffffffffffffUL; } else if ((netdev_mc_count(dev) > METH_MCF_LIMIT) || (dev->flags & IFF_ALLMULTI)) { priv->mac_ctrl |= METH_ACCEPT_AMCAST; priv->mcast_filter = 0xffffffffffffffffUL; } else { struct netdev_hw_addr *ha; priv->mac_ctrl |= METH_ACCEPT_MCAST; netdev_for_each_mc_addr(ha, dev) set_bit((ether_crc(ETH_ALEN, ha->addr) >> 26), (volatile unsigned long *)&priv->mcast_filter); } /* Write the changes to the chip registers. */ mace->eth.mac_ctrl = priv->mac_ctrl; mace->eth.mcast_filter = priv->mcast_filter; /* Done! */ spin_unlock_irqrestore(&priv->meth_lock, flags); netif_wake_queue(dev); } static const struct net_device_ops meth_netdev_ops = { .ndo_open = meth_open, .ndo_stop = meth_release, .ndo_start_xmit = meth_tx, .ndo_eth_ioctl = meth_ioctl, .ndo_tx_timeout = meth_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_set_rx_mode = meth_set_rx_mode, }; /* * The init function. */ static int meth_probe(struct platform_device *pdev) { struct net_device *dev; struct meth_private *priv; int err; dev = alloc_etherdev(sizeof(struct meth_private)); if (!dev) return -ENOMEM; dev->netdev_ops = &meth_netdev_ops; dev->watchdog_timeo = timeout; dev->irq = MACE_ETHERNET_IRQ; dev->base_addr = (unsigned long)&mace->eth; eth_hw_addr_set(dev, o2meth_eaddr); priv = netdev_priv(dev); priv->pdev = pdev; spin_lock_init(&priv->meth_lock); SET_NETDEV_DEV(dev, &pdev->dev); err = register_netdev(dev); if (err) { free_netdev(dev); return err; } printk(KERN_INFO "%s: SGI MACE Ethernet rev. %d\n", dev->name, (unsigned int)(mace->eth.mac_ctrl >> 29)); return 0; } static int meth_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); unregister_netdev(dev); free_netdev(dev); return 0; } static struct platform_driver meth_driver = { .probe = meth_probe, .remove = meth_remove, .driver = { .name = "meth", } }; module_platform_driver(meth_driver); MODULE_AUTHOR("Ilya Volynets <[email protected]>"); MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:meth");
linux-master
drivers/net/ethernet/sgi/meth.c
// SPDX-License-Identifier: GPL-2.0 /* Driver for SGI's IOC3 based Ethernet cards as found in the PCI card. * * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc. * * References: * o IOC3 ASIC specification 4.51, 1996-04-18 * o IEEE 802.3 specification, 2000 edition * o DP38840A Specification, National Semiconductor, March 1997 * * To do: * * o Use prefetching for large packets. What is a good lower limit for * prefetching? * o Use hardware checksums. * o Which PHYs might possibly be attached to the IOC3 in real live, * which workarounds are required for them? Do we ever have Lucent's? * o For the 2.5 branch kill the mii-tool ioctls. */ #define IOC3_NAME "ioc3-eth" #define IOC3_VERSION "2.6.3-4" #include <linux/delay.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/init.h> #include <linux/crc16.h> #include <linux/crc32.h> #include <linux/mii.h> #include <linux/in.h> #include <linux/io.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/gfp.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/skbuff.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/nvmem-consumer.h> #include <net/ip.h> #include <asm/sn/ioc3.h> #include <asm/pci/bridge.h> #define CRC16_INIT 0 #define CRC16_VALID 0xb001 /* Number of RX buffers. This is tunable in the range of 16 <= x < 512. * The value must be a power of two. */ #define RX_BUFFS 64 #define RX_RING_ENTRIES 512 /* fixed in hardware */ #define RX_RING_MASK (RX_RING_ENTRIES - 1) #define RX_RING_SIZE (RX_RING_ENTRIES * sizeof(u64)) /* 128 TX buffers (not tunable) */ #define TX_RING_ENTRIES 128 #define TX_RING_MASK (TX_RING_ENTRIES - 1) #define TX_RING_SIZE (TX_RING_ENTRIES * sizeof(struct ioc3_etxd)) /* IOC3 does dma transfers in 128 byte blocks */ #define IOC3_DMA_XFER_LEN 128UL /* Every RX buffer starts with 8 byte descriptor data */ #define RX_OFFSET (sizeof(struct ioc3_erxbuf) + NET_IP_ALIGN) #define RX_BUF_SIZE (13 * IOC3_DMA_XFER_LEN) #define ETCSR_FD ((21 << ETCSR_IPGR2_SHIFT) | (21 << ETCSR_IPGR1_SHIFT) | 21) #define ETCSR_HD ((17 << ETCSR_IPGR2_SHIFT) | (11 << ETCSR_IPGR1_SHIFT) | 21) /* Private per NIC data of the driver. */ struct ioc3_private { struct ioc3_ethregs *regs; struct device *dma_dev; u32 *ssram; unsigned long *rxr; /* pointer to receiver ring */ void *tx_ring; struct ioc3_etxd *txr; dma_addr_t rxr_dma; dma_addr_t txr_dma; struct sk_buff *rx_skbs[RX_RING_ENTRIES]; struct sk_buff *tx_skbs[TX_RING_ENTRIES]; int rx_ci; /* RX consumer index */ int rx_pi; /* RX producer index */ int tx_ci; /* TX consumer index */ int tx_pi; /* TX producer index */ int txqlen; u32 emcr, ehar_h, ehar_l; spinlock_t ioc3_lock; struct mii_if_info mii; /* Members used by autonegotiation */ struct timer_list ioc3_timer; }; static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static void ioc3_set_multicast_list(struct net_device *dev); static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); static void ioc3_timeout(struct net_device *dev, unsigned int txqueue); static inline unsigned int ioc3_hash(const unsigned char *addr); static void ioc3_start(struct ioc3_private *ip); static inline void ioc3_stop(struct ioc3_private *ip); static void ioc3_init(struct net_device *dev); static int ioc3_alloc_rx_bufs(struct net_device *dev); static void ioc3_free_rx_bufs(struct ioc3_private *ip); static inline void ioc3_clean_tx_ring(struct ioc3_private *ip); static const struct ethtool_ops ioc3_ethtool_ops; static inline unsigned long aligned_rx_skb_addr(unsigned long addr) { return (~addr + 1) & (IOC3_DMA_XFER_LEN - 1UL); } static inline int ioc3_alloc_skb(struct ioc3_private *ip, struct sk_buff **skb, struct ioc3_erxbuf **rxb, dma_addr_t *rxb_dma) { struct sk_buff *new_skb; dma_addr_t d; int offset; new_skb = alloc_skb(RX_BUF_SIZE + IOC3_DMA_XFER_LEN - 1, GFP_ATOMIC); if (!new_skb) return -ENOMEM; /* ensure buffer is aligned to IOC3_DMA_XFER_LEN */ offset = aligned_rx_skb_addr((unsigned long)new_skb->data); if (offset) skb_reserve(new_skb, offset); d = dma_map_single(ip->dma_dev, new_skb->data, RX_BUF_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(ip->dma_dev, d)) { dev_kfree_skb_any(new_skb); return -ENOMEM; } *rxb_dma = d; *rxb = (struct ioc3_erxbuf *)new_skb->data; skb_reserve(new_skb, RX_OFFSET); *skb = new_skb; return 0; } #ifdef CONFIG_PCI_XTALK_BRIDGE static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr) { return (addr & ~PCI64_ATTR_BAR) | attr; } #define ERBAR_VAL (ERBAR_BARRIER_BIT << ERBAR_RXBARR_SHIFT) #else static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr) { return addr; } #define ERBAR_VAL 0 #endif static int ioc3eth_nvmem_match(struct device *dev, const void *data) { const char *name = dev_name(dev); const char *prefix = data; int prefix_len; prefix_len = strlen(prefix); if (strlen(name) < (prefix_len + 3)) return 0; if (memcmp(prefix, name, prefix_len) != 0) return 0; /* found nvmem device which is attached to our ioc3 * now check for one wire family code 09, 89 and 91 */ if (memcmp(name + prefix_len, "09-", 3) == 0) return 1; if (memcmp(name + prefix_len, "89-", 3) == 0) return 1; if (memcmp(name + prefix_len, "91-", 3) == 0) return 1; return 0; } static int ioc3eth_get_mac_addr(struct resource *res, u8 mac_addr[6]) { struct nvmem_device *nvmem; char prefix[24]; u8 prom[16]; int ret; int i; snprintf(prefix, sizeof(prefix), "ioc3-%012llx-", res->start & ~0xffff); nvmem = nvmem_device_find(prefix, ioc3eth_nvmem_match); if (IS_ERR(nvmem)) return PTR_ERR(nvmem); ret = nvmem_device_read(nvmem, 0, 16, prom); nvmem_device_put(nvmem); if (ret < 0) return ret; /* check, if content is valid */ if (prom[0] != 0x0a || crc16(CRC16_INIT, prom, 13) != CRC16_VALID) return -EINVAL; for (i = 0; i < 6; i++) mac_addr[i] = prom[10 - i]; return 0; } static void __ioc3_set_mac_address(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); writel((dev->dev_addr[5] << 8) | dev->dev_addr[4], &ip->regs->emar_h); writel((dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | dev->dev_addr[0], &ip->regs->emar_l); } static int ioc3_set_mac_address(struct net_device *dev, void *addr) { struct ioc3_private *ip = netdev_priv(dev); struct sockaddr *sa = addr; eth_hw_addr_set(dev, sa->sa_data); spin_lock_irq(&ip->ioc3_lock); __ioc3_set_mac_address(dev); spin_unlock_irq(&ip->ioc3_lock); return 0; } /* Caller must hold the ioc3_lock ever for MII readers. This is also * used to protect the transmitter side but it's low contention. */ static int ioc3_mdio_read(struct net_device *dev, int phy, int reg) { struct ioc3_private *ip = netdev_priv(dev); struct ioc3_ethregs *regs = ip->regs; while (readl(&regs->micr) & MICR_BUSY) ; writel((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG, &regs->micr); while (readl(&regs->micr) & MICR_BUSY) ; return readl(&regs->midr_r) & MIDR_DATA_MASK; } static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data) { struct ioc3_private *ip = netdev_priv(dev); struct ioc3_ethregs *regs = ip->regs; while (readl(&regs->micr) & MICR_BUSY) ; writel(data, &regs->midr_w); writel((phy << MICR_PHYADDR_SHIFT) | reg, &regs->micr); while (readl(&regs->micr) & MICR_BUSY) ; } static int ioc3_mii_init(struct ioc3_private *ip); static struct net_device_stats *ioc3_get_stats(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); struct ioc3_ethregs *regs = ip->regs; dev->stats.collisions += readl(&regs->etcdc) & ETCDC_COLLCNT_MASK; return &dev->stats; } static void ioc3_tcpudp_checksum(struct sk_buff *skb, u32 hwsum, int len) { struct ethhdr *eh = eth_hdr(skb); unsigned int proto; unsigned char *cp; struct iphdr *ih; u32 csum, ehsum; u16 *ew; /* Did hardware handle the checksum at all? The cases we can handle * are: * * - TCP and UDP checksums of IPv4 only. * - IPv6 would be doable but we keep that for later ... * - Only unfragmented packets. Did somebody already tell you * fragmentation is evil? * - don't care about packet size. Worst case when processing a * malformed packet we'll try to access the packet at ip header + * 64 bytes which is still inside the skb. Even in the unlikely * case where the checksum is right the higher layers will still * drop the packet as appropriate. */ if (eh->h_proto != htons(ETH_P_IP)) return; ih = (struct iphdr *)((char *)eh + ETH_HLEN); if (ip_is_fragment(ih)) return; proto = ih->protocol; if (proto != IPPROTO_TCP && proto != IPPROTO_UDP) return; /* Same as tx - compute csum of pseudo header */ csum = hwsum + (ih->tot_len - (ih->ihl << 2)) + htons((u16)ih->protocol) + (ih->saddr >> 16) + (ih->saddr & 0xffff) + (ih->daddr >> 16) + (ih->daddr & 0xffff); /* Sum up ethernet dest addr, src addr and protocol */ ew = (u16 *)eh; ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6]; ehsum = (ehsum & 0xffff) + (ehsum >> 16); ehsum = (ehsum & 0xffff) + (ehsum >> 16); csum += 0xffff ^ ehsum; /* In the next step we also subtract the 1's complement * checksum of the trailing ethernet CRC. */ cp = (char *)eh + len; /* points at trailing CRC */ if (len & 1) { csum += 0xffff ^ (u16)((cp[1] << 8) | cp[0]); csum += 0xffff ^ (u16)((cp[3] << 8) | cp[2]); } else { csum += 0xffff ^ (u16)((cp[0] << 8) | cp[1]); csum += 0xffff ^ (u16)((cp[2] << 8) | cp[3]); } csum = (csum & 0xffff) + (csum >> 16); csum = (csum & 0xffff) + (csum >> 16); if (csum == 0xffff) skb->ip_summed = CHECKSUM_UNNECESSARY; } static inline void ioc3_rx(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); struct sk_buff *skb, *new_skb; int rx_entry, n_entry, len; struct ioc3_erxbuf *rxb; unsigned long *rxr; dma_addr_t d; u32 w0, err; rxr = ip->rxr; /* Ring base */ rx_entry = ip->rx_ci; /* RX consume index */ n_entry = ip->rx_pi; skb = ip->rx_skbs[rx_entry]; rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); w0 = be32_to_cpu(rxb->w0); while (w0 & ERXBUF_V) { err = be32_to_cpu(rxb->err); /* It's valid ... */ if (err & ERXBUF_GOODPKT) { len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4; skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); if (ioc3_alloc_skb(ip, &new_skb, &rxb, &d)) { /* Ouch, drop packet and just recycle packet * to keep the ring filled. */ dev->stats.rx_dropped++; new_skb = skb; d = rxr[rx_entry]; goto next; } if (likely(dev->features & NETIF_F_RXCSUM)) ioc3_tcpudp_checksum(skb, w0 & ERXBUF_IPCKSUM_MASK, len); dma_unmap_single(ip->dma_dev, rxr[rx_entry], RX_BUF_SIZE, DMA_FROM_DEVICE); netif_rx(skb); ip->rx_skbs[rx_entry] = NULL; /* Poison */ dev->stats.rx_packets++; /* Statistics */ dev->stats.rx_bytes += len; } else { /* The frame is invalid and the skb never * reached the network layer so we can just * recycle it. */ new_skb = skb; d = rxr[rx_entry]; dev->stats.rx_errors++; } if (err & ERXBUF_CRCERR) /* Statistics */ dev->stats.rx_crc_errors++; if (err & ERXBUF_FRAMERR) dev->stats.rx_frame_errors++; next: ip->rx_skbs[n_entry] = new_skb; rxr[n_entry] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR)); rxb->w0 = 0; /* Clear valid flag */ n_entry = (n_entry + 1) & RX_RING_MASK; /* Update erpir */ /* Now go on to the next ring entry. */ rx_entry = (rx_entry + 1) & RX_RING_MASK; skb = ip->rx_skbs[rx_entry]; rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); w0 = be32_to_cpu(rxb->w0); } writel((n_entry << 3) | ERPIR_ARM, &ip->regs->erpir); ip->rx_pi = n_entry; ip->rx_ci = rx_entry; } static inline void ioc3_tx(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); struct ioc3_ethregs *regs = ip->regs; unsigned long packets, bytes; int tx_entry, o_entry; struct sk_buff *skb; u32 etcir; spin_lock(&ip->ioc3_lock); etcir = readl(&regs->etcir); tx_entry = (etcir >> 7) & TX_RING_MASK; o_entry = ip->tx_ci; packets = 0; bytes = 0; while (o_entry != tx_entry) { packets++; skb = ip->tx_skbs[o_entry]; bytes += skb->len; dev_consume_skb_irq(skb); ip->tx_skbs[o_entry] = NULL; o_entry = (o_entry + 1) & TX_RING_MASK; /* Next */ etcir = readl(&regs->etcir); /* More pkts sent? */ tx_entry = (etcir >> 7) & TX_RING_MASK; } dev->stats.tx_packets += packets; dev->stats.tx_bytes += bytes; ip->txqlen -= packets; if (netif_queue_stopped(dev) && ip->txqlen < TX_RING_ENTRIES) netif_wake_queue(dev); ip->tx_ci = o_entry; spin_unlock(&ip->ioc3_lock); } /* Deal with fatal IOC3 errors. This condition might be caused by a hard or * software problems, so we should try to recover * more gracefully if this ever happens. In theory we might be flooded * with such error interrupts if something really goes wrong, so we might * also consider to take the interface down. */ static void ioc3_error(struct net_device *dev, u32 eisr) { struct ioc3_private *ip = netdev_priv(dev); spin_lock(&ip->ioc3_lock); if (eisr & EISR_RXOFLO) net_err_ratelimited("%s: RX overflow.\n", dev->name); if (eisr & EISR_RXBUFOFLO) net_err_ratelimited("%s: RX buffer overflow.\n", dev->name); if (eisr & EISR_RXMEMERR) net_err_ratelimited("%s: RX PCI error.\n", dev->name); if (eisr & EISR_RXPARERR) net_err_ratelimited("%s: RX SSRAM parity error.\n", dev->name); if (eisr & EISR_TXBUFUFLO) net_err_ratelimited("%s: TX buffer underflow.\n", dev->name); if (eisr & EISR_TXMEMERR) net_err_ratelimited("%s: TX PCI error.\n", dev->name); ioc3_stop(ip); ioc3_free_rx_bufs(ip); ioc3_clean_tx_ring(ip); ioc3_init(dev); if (ioc3_alloc_rx_bufs(dev)) { netdev_err(dev, "%s: rx buffer allocation failed\n", __func__); spin_unlock(&ip->ioc3_lock); return; } ioc3_start(ip); ioc3_mii_init(ip); netif_wake_queue(dev); spin_unlock(&ip->ioc3_lock); } /* The interrupt handler does all of the Rx thread work and cleans up * after the Tx thread. */ static irqreturn_t ioc3_interrupt(int irq, void *dev_id) { struct ioc3_private *ip = netdev_priv(dev_id); struct ioc3_ethregs *regs = ip->regs; u32 eisr; eisr = readl(&regs->eisr); writel(eisr, &regs->eisr); readl(&regs->eisr); /* Flush */ if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR)) ioc3_error(dev_id, eisr); if (eisr & EISR_RXTIMERINT) ioc3_rx(dev_id); if (eisr & EISR_TXEXPLICIT) ioc3_tx(dev_id); return IRQ_HANDLED; } static inline void ioc3_setup_duplex(struct ioc3_private *ip) { struct ioc3_ethregs *regs = ip->regs; spin_lock_irq(&ip->ioc3_lock); if (ip->mii.full_duplex) { writel(ETCSR_FD, &regs->etcsr); ip->emcr |= EMCR_DUPLEX; } else { writel(ETCSR_HD, &regs->etcsr); ip->emcr &= ~EMCR_DUPLEX; } writel(ip->emcr, &regs->emcr); spin_unlock_irq(&ip->ioc3_lock); } static void ioc3_timer(struct timer_list *t) { struct ioc3_private *ip = from_timer(ip, t, ioc3_timer); /* Print the link status if it has changed */ mii_check_media(&ip->mii, 1, 0); ioc3_setup_duplex(ip); ip->ioc3_timer.expires = jiffies + ((12 * HZ) / 10); /* 1.2s */ add_timer(&ip->ioc3_timer); } /* Try to find a PHY. There is no apparent relation between the MII addresses * in the SGI documentation and what we find in reality, so we simply probe * for the PHY. */ static int ioc3_mii_init(struct ioc3_private *ip) { u16 word; int i; for (i = 0; i < 32; i++) { word = ioc3_mdio_read(ip->mii.dev, i, MII_PHYSID1); if (word != 0xffff && word != 0x0000) { ip->mii.phy_id = i; return 0; } } ip->mii.phy_id = -1; return -ENODEV; } static void ioc3_mii_start(struct ioc3_private *ip) { ip->ioc3_timer.expires = jiffies + (12 * HZ) / 10; /* 1.2 sec. */ add_timer(&ip->ioc3_timer); } static inline void ioc3_tx_unmap(struct ioc3_private *ip, int entry) { struct ioc3_etxd *desc; u32 cmd, bufcnt, len; desc = &ip->txr[entry]; cmd = be32_to_cpu(desc->cmd); bufcnt = be32_to_cpu(desc->bufcnt); if (cmd & ETXD_B1V) { len = (bufcnt & ETXD_B1CNT_MASK) >> ETXD_B1CNT_SHIFT; dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p1), len, DMA_TO_DEVICE); } if (cmd & ETXD_B2V) { len = (bufcnt & ETXD_B2CNT_MASK) >> ETXD_B2CNT_SHIFT; dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p2), len, DMA_TO_DEVICE); } } static inline void ioc3_clean_tx_ring(struct ioc3_private *ip) { struct sk_buff *skb; int i; for (i = 0; i < TX_RING_ENTRIES; i++) { skb = ip->tx_skbs[i]; if (skb) { ioc3_tx_unmap(ip, i); ip->tx_skbs[i] = NULL; dev_kfree_skb_any(skb); } ip->txr[i].cmd = 0; } ip->tx_pi = 0; ip->tx_ci = 0; } static void ioc3_free_rx_bufs(struct ioc3_private *ip) { int rx_entry, n_entry; struct sk_buff *skb; n_entry = ip->rx_ci; rx_entry = ip->rx_pi; while (n_entry != rx_entry) { skb = ip->rx_skbs[n_entry]; if (skb) { dma_unmap_single(ip->dma_dev, be64_to_cpu(ip->rxr[n_entry]), RX_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); } n_entry = (n_entry + 1) & RX_RING_MASK; } } static int ioc3_alloc_rx_bufs(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); struct ioc3_erxbuf *rxb; dma_addr_t d; int i; /* Now the rx buffers. The RX ring may be larger but * we only allocate 16 buffers for now. Need to tune * this for performance and memory later. */ for (i = 0; i < RX_BUFFS; i++) { if (ioc3_alloc_skb(ip, &ip->rx_skbs[i], &rxb, &d)) return -ENOMEM; rxb->w0 = 0; /* Clear valid flag */ ip->rxr[i] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR)); } ip->rx_ci = 0; ip->rx_pi = RX_BUFFS; return 0; } static inline void ioc3_ssram_disc(struct ioc3_private *ip) { struct ioc3_ethregs *regs = ip->regs; u32 *ssram0 = &ip->ssram[0x0000]; u32 *ssram1 = &ip->ssram[0x4000]; u32 pattern = 0x5555; /* Assume the larger size SSRAM and enable parity checking */ writel(readl(&regs->emcr) | (EMCR_BUFSIZ | EMCR_RAMPAR), &regs->emcr); readl(&regs->emcr); /* Flush */ writel(pattern, ssram0); writel(~pattern & IOC3_SSRAM_DM, ssram1); if ((readl(ssram0) & IOC3_SSRAM_DM) != pattern || (readl(ssram1) & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) { /* set ssram size to 64 KB */ ip->emcr |= EMCR_RAMPAR; writel(readl(&regs->emcr) & ~EMCR_BUFSIZ, &regs->emcr); } else { ip->emcr |= EMCR_BUFSIZ | EMCR_RAMPAR; } } static void ioc3_init(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); struct ioc3_ethregs *regs = ip->regs; del_timer_sync(&ip->ioc3_timer); /* Kill if running */ writel(EMCR_RST, &regs->emcr); /* Reset */ readl(&regs->emcr); /* Flush WB */ udelay(4); /* Give it time ... */ writel(0, &regs->emcr); readl(&regs->emcr); /* Misc registers */ writel(ERBAR_VAL, &regs->erbar); readl(&regs->etcdc); /* Clear on read */ writel(15, &regs->ercsr); /* RX low watermark */ writel(0, &regs->ertr); /* Interrupt immediately */ __ioc3_set_mac_address(dev); writel(ip->ehar_h, &regs->ehar_h); writel(ip->ehar_l, &regs->ehar_l); writel(42, &regs->ersr); /* XXX should be random */ } static void ioc3_start(struct ioc3_private *ip) { struct ioc3_ethregs *regs = ip->regs; unsigned long ring; /* Now the rx ring base, consume & produce registers. */ ring = ioc3_map(ip->rxr_dma, PCI64_ATTR_PREC); writel(ring >> 32, &regs->erbr_h); writel(ring & 0xffffffff, &regs->erbr_l); writel(ip->rx_ci << 3, &regs->ercir); writel((ip->rx_pi << 3) | ERPIR_ARM, &regs->erpir); ring = ioc3_map(ip->txr_dma, PCI64_ATTR_PREC); ip->txqlen = 0; /* nothing queued */ /* Now the tx ring base, consume & produce registers. */ writel(ring >> 32, &regs->etbr_h); writel(ring & 0xffffffff, &regs->etbr_l); writel(ip->tx_pi << 7, &regs->etpir); writel(ip->tx_ci << 7, &regs->etcir); readl(&regs->etcir); /* Flush */ ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN | EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN; writel(ip->emcr, &regs->emcr); writel(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXEXPLICIT | EISR_TXMEMERR, &regs->eier); readl(&regs->eier); } static inline void ioc3_stop(struct ioc3_private *ip) { struct ioc3_ethregs *regs = ip->regs; writel(0, &regs->emcr); /* Shutup */ writel(0, &regs->eier); /* Disable interrupts */ readl(&regs->eier); /* Flush */ } static int ioc3_open(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); ip->ehar_h = 0; ip->ehar_l = 0; ioc3_init(dev); if (ioc3_alloc_rx_bufs(dev)) { netdev_err(dev, "%s: rx buffer allocation failed\n", __func__); return -ENOMEM; } ioc3_start(ip); ioc3_mii_start(ip); netif_start_queue(dev); return 0; } static int ioc3_close(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); del_timer_sync(&ip->ioc3_timer); netif_stop_queue(dev); ioc3_stop(ip); ioc3_free_rx_bufs(ip); ioc3_clean_tx_ring(ip); return 0; } static const struct net_device_ops ioc3_netdev_ops = { .ndo_open = ioc3_open, .ndo_stop = ioc3_close, .ndo_start_xmit = ioc3_start_xmit, .ndo_tx_timeout = ioc3_timeout, .ndo_get_stats = ioc3_get_stats, .ndo_set_rx_mode = ioc3_set_multicast_list, .ndo_eth_ioctl = ioc3_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ioc3_set_mac_address, }; static int ioc3eth_probe(struct platform_device *pdev) { u32 sw_physid1, sw_physid2, vendor, model, rev; struct ioc3_private *ip; struct net_device *dev; struct resource *regs; u8 mac_addr[6]; int err; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_err(&pdev->dev, "Invalid resource\n"); return -EINVAL; } /* get mac addr from one wire prom */ if (ioc3eth_get_mac_addr(regs, mac_addr)) return -EPROBE_DEFER; /* not available yet */ dev = alloc_etherdev(sizeof(struct ioc3_private)); if (!dev) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); ip = netdev_priv(dev); ip->dma_dev = pdev->dev.parent; ip->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ip->regs)) { err = PTR_ERR(ip->regs); goto out_free; } ip->ssram = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(ip->ssram)) { err = PTR_ERR(ip->ssram); goto out_free; } dev->irq = platform_get_irq(pdev, 0); if (dev->irq < 0) { err = dev->irq; goto out_free; } if (devm_request_irq(&pdev->dev, dev->irq, ioc3_interrupt, IRQF_SHARED, "ioc3-eth", dev)) { dev_err(&pdev->dev, "Can't get irq %d\n", dev->irq); err = -ENODEV; goto out_free; } spin_lock_init(&ip->ioc3_lock); timer_setup(&ip->ioc3_timer, ioc3_timer, 0); ioc3_stop(ip); /* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */ ip->rxr = dma_alloc_coherent(ip->dma_dev, RX_RING_SIZE, &ip->rxr_dma, GFP_KERNEL); if (!ip->rxr) { pr_err("ioc3-eth: rx ring allocation failed\n"); err = -ENOMEM; goto out_stop; } /* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */ ip->tx_ring = dma_alloc_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, &ip->txr_dma, GFP_KERNEL); if (!ip->tx_ring) { pr_err("ioc3-eth: tx ring allocation failed\n"); err = -ENOMEM; goto out_stop; } /* Align TX ring */ ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K); ip->txr_dma = ALIGN(ip->txr_dma, SZ_16K); ioc3_init(dev); ip->mii.phy_id_mask = 0x1f; ip->mii.reg_num_mask = 0x1f; ip->mii.dev = dev; ip->mii.mdio_read = ioc3_mdio_read; ip->mii.mdio_write = ioc3_mdio_write; ioc3_mii_init(ip); if (ip->mii.phy_id == -1) { netdev_err(dev, "Didn't find a PHY, goodbye.\n"); err = -ENODEV; goto out_stop; } ioc3_mii_start(ip); ioc3_ssram_disc(ip); eth_hw_addr_set(dev, mac_addr); /* The IOC3-specific entries in the device structure. */ dev->watchdog_timeo = 5 * HZ; dev->netdev_ops = &ioc3_netdev_ops; dev->ethtool_ops = &ioc3_ethtool_ops; dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; dev->features = NETIF_F_IP_CSUM | NETIF_F_HIGHDMA; sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1); sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2); err = register_netdev(dev); if (err) goto out_stop; mii_check_media(&ip->mii, 1, 1); ioc3_setup_duplex(ip); vendor = (sw_physid1 << 12) | (sw_physid2 >> 4); model = (sw_physid2 >> 4) & 0x3f; rev = sw_physid2 & 0xf; netdev_info(dev, "Using PHY %d, vendor 0x%x, model %d, rev %d.\n", ip->mii.phy_id, vendor, model, rev); netdev_info(dev, "IOC3 SSRAM has %d kbyte.\n", ip->emcr & EMCR_BUFSIZ ? 128 : 64); return 0; out_stop: del_timer_sync(&ip->ioc3_timer); if (ip->rxr) dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma); if (ip->tx_ring) dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma); out_free: free_netdev(dev); return err; } static int ioc3eth_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct ioc3_private *ip = netdev_priv(dev); dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma); dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma); unregister_netdev(dev); del_timer_sync(&ip->ioc3_timer); free_netdev(dev); return 0; } static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); struct ioc3_etxd *desc; unsigned long data; unsigned int len; int produce; u32 w0 = 0; /* IOC3 has a fairly simple minded checksumming hardware which simply * adds up the 1's complement checksum for the entire packet and * inserts it at an offset which can be specified in the descriptor * into the transmit packet. This means we have to compensate for the * MAC header which should not be summed and the TCP/UDP pseudo headers * manually. */ if (skb->ip_summed == CHECKSUM_PARTIAL) { const struct iphdr *ih = ip_hdr(skb); const int proto = ntohs(ih->protocol); unsigned int csoff; u32 csum, ehsum; u16 *eh; /* The MAC header. skb->mac seem the logic approach * to find the MAC header - except it's a NULL pointer ... */ eh = (u16 *)skb->data; /* Sum up dest addr, src addr and protocol */ ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6]; /* Skip IP header; it's sum is always zero and was * already filled in by ip_output.c */ csum = csum_tcpudp_nofold(ih->saddr, ih->daddr, ih->tot_len - (ih->ihl << 2), proto, csum_fold(ehsum)); csum = (csum & 0xffff) + (csum >> 16); /* Fold again */ csum = (csum & 0xffff) + (csum >> 16); csoff = ETH_HLEN + (ih->ihl << 2); if (proto == IPPROTO_UDP) { csoff += offsetof(struct udphdr, check); udp_hdr(skb)->check = csum; } if (proto == IPPROTO_TCP) { csoff += offsetof(struct tcphdr, check); tcp_hdr(skb)->check = csum; } w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT); } spin_lock_irq(&ip->ioc3_lock); data = (unsigned long)skb->data; len = skb->len; produce = ip->tx_pi; desc = &ip->txr[produce]; if (len <= 104) { /* Short packet, let's copy it directly into the ring. */ skb_copy_from_linear_data(skb, desc->data, skb->len); if (len < ETH_ZLEN) { /* Very short packet, pad with zeros at the end. */ memset(desc->data + len, 0, ETH_ZLEN - len); len = ETH_ZLEN; } desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0); desc->bufcnt = cpu_to_be32(len); } else if ((data ^ (data + len - 1)) & 0x4000) { unsigned long b2 = (data | 0x3fffUL) + 1UL; unsigned long s1 = b2 - data; unsigned long s2 = data + len - b2; dma_addr_t d1, d2; desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | ETXD_B2V | w0); desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) | (s2 << ETXD_B2CNT_SHIFT)); d1 = dma_map_single(ip->dma_dev, skb->data, s1, DMA_TO_DEVICE); if (dma_mapping_error(ip->dma_dev, d1)) goto drop_packet; d2 = dma_map_single(ip->dma_dev, (void *)b2, s1, DMA_TO_DEVICE); if (dma_mapping_error(ip->dma_dev, d2)) { dma_unmap_single(ip->dma_dev, d1, len, DMA_TO_DEVICE); goto drop_packet; } desc->p1 = cpu_to_be64(ioc3_map(d1, PCI64_ATTR_PREF)); desc->p2 = cpu_to_be64(ioc3_map(d2, PCI64_ATTR_PREF)); } else { dma_addr_t d; /* Normal sized packet that doesn't cross a page boundary. */ desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0); desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT); d = dma_map_single(ip->dma_dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(ip->dma_dev, d)) goto drop_packet; desc->p1 = cpu_to_be64(ioc3_map(d, PCI64_ATTR_PREF)); } mb(); /* make sure all descriptor changes are visible */ ip->tx_skbs[produce] = skb; /* Remember skb */ produce = (produce + 1) & TX_RING_MASK; ip->tx_pi = produce; writel(produce << 7, &ip->regs->etpir); /* Fire ... */ ip->txqlen++; if (ip->txqlen >= (TX_RING_ENTRIES - 1)) netif_stop_queue(dev); spin_unlock_irq(&ip->ioc3_lock); return NETDEV_TX_OK; drop_packet: dev_kfree_skb_any(skb); dev->stats.tx_dropped++; spin_unlock_irq(&ip->ioc3_lock); return NETDEV_TX_OK; } static void ioc3_timeout(struct net_device *dev, unsigned int txqueue) { struct ioc3_private *ip = netdev_priv(dev); netdev_err(dev, "transmit timed out, resetting\n"); spin_lock_irq(&ip->ioc3_lock); ioc3_stop(ip); ioc3_free_rx_bufs(ip); ioc3_clean_tx_ring(ip); ioc3_init(dev); if (ioc3_alloc_rx_bufs(dev)) { netdev_err(dev, "%s: rx buffer allocation failed\n", __func__); spin_unlock_irq(&ip->ioc3_lock); return; } ioc3_start(ip); ioc3_mii_init(ip); ioc3_mii_start(ip); spin_unlock_irq(&ip->ioc3_lock); netif_wake_queue(dev); } /* Given a multicast ethernet address, this routine calculates the * address's bit index in the logical address filter mask */ static inline unsigned int ioc3_hash(const unsigned char *addr) { unsigned int temp = 0; int bits; u32 crc; crc = ether_crc_le(ETH_ALEN, addr); crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */ for (bits = 6; --bits >= 0; ) { temp <<= 1; temp |= (crc & 0x1); crc >>= 1; } return temp; } static void ioc3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strscpy(info->driver, IOC3_NAME, sizeof(info->driver)); strscpy(info->version, IOC3_VERSION, sizeof(info->version)); strscpy(info->bus_info, pci_name(to_pci_dev(dev->dev.parent)), sizeof(info->bus_info)); } static int ioc3_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct ioc3_private *ip = netdev_priv(dev); spin_lock_irq(&ip->ioc3_lock); mii_ethtool_get_link_ksettings(&ip->mii, cmd); spin_unlock_irq(&ip->ioc3_lock); return 0; } static int ioc3_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct ioc3_private *ip = netdev_priv(dev); int rc; spin_lock_irq(&ip->ioc3_lock); rc = mii_ethtool_set_link_ksettings(&ip->mii, cmd); spin_unlock_irq(&ip->ioc3_lock); return rc; } static int ioc3_nway_reset(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); int rc; spin_lock_irq(&ip->ioc3_lock); rc = mii_nway_restart(&ip->mii); spin_unlock_irq(&ip->ioc3_lock); return rc; } static u32 ioc3_get_link(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); int rc; spin_lock_irq(&ip->ioc3_lock); rc = mii_link_ok(&ip->mii); spin_unlock_irq(&ip->ioc3_lock); return rc; } static const struct ethtool_ops ioc3_ethtool_ops = { .get_drvinfo = ioc3_get_drvinfo, .nway_reset = ioc3_nway_reset, .get_link = ioc3_get_link, .get_link_ksettings = ioc3_get_link_ksettings, .set_link_ksettings = ioc3_set_link_ksettings, }; static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct ioc3_private *ip = netdev_priv(dev); int rc; spin_lock_irq(&ip->ioc3_lock); rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL); spin_unlock_irq(&ip->ioc3_lock); return rc; } static void ioc3_set_multicast_list(struct net_device *dev) { struct ioc3_private *ip = netdev_priv(dev); struct ioc3_ethregs *regs = ip->regs; struct netdev_hw_addr *ha; u64 ehar = 0; spin_lock_irq(&ip->ioc3_lock); if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ ip->emcr |= EMCR_PROMISC; writel(ip->emcr, &regs->emcr); readl(&regs->emcr); } else { ip->emcr &= ~EMCR_PROMISC; writel(ip->emcr, &regs->emcr); /* Clear promiscuous. */ readl(&regs->emcr); if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { /* Too many for hashing to make sense or we want all * multicast packets anyway, so skip computing all the * hashes and just accept all packets. */ ip->ehar_h = 0xffffffff; ip->ehar_l = 0xffffffff; } else { netdev_for_each_mc_addr(ha, dev) { ehar |= (1UL << ioc3_hash(ha->addr)); } ip->ehar_h = ehar >> 32; ip->ehar_l = ehar & 0xffffffff; } writel(ip->ehar_h, &regs->ehar_h); writel(ip->ehar_l, &regs->ehar_l); } spin_unlock_irq(&ip->ioc3_lock); } static struct platform_driver ioc3eth_driver = { .probe = ioc3eth_probe, .remove = ioc3eth_remove, .driver = { .name = "ioc3-eth", } }; module_platform_driver(ioc3eth_driver); MODULE_AUTHOR("Ralf Baechle <[email protected]>"); MODULE_DESCRIPTION("SGI IOC3 Ethernet driver"); MODULE_LICENSE("GPL");
linux-master
drivers/net/ethernet/sgi/ioc3-eth.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2016-2017, National Instruments Corp. * * Author: Moritz Fischer <[email protected]> */ #include <linux/etherdevice.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <linux/phy.h> #include <linux/mii.h> #include <linux/nvmem-consumer.h> #include <linux/ethtool.h> #include <linux/iopoll.h> #define TX_BD_NUM 64 #define RX_BD_NUM 128 /* Axi DMA Register definitions */ #define XAXIDMA_TX_CR_OFFSET 0x00 /* Channel control */ #define XAXIDMA_TX_SR_OFFSET 0x04 /* Status */ #define XAXIDMA_TX_CDESC_OFFSET 0x08 /* Current descriptor pointer */ #define XAXIDMA_TX_TDESC_OFFSET 0x10 /* Tail descriptor pointer */ #define XAXIDMA_RX_CR_OFFSET 0x30 /* Channel control */ #define XAXIDMA_RX_SR_OFFSET 0x34 /* Status */ #define XAXIDMA_RX_CDESC_OFFSET 0x38 /* Current descriptor pointer */ #define XAXIDMA_RX_TDESC_OFFSET 0x40 /* Tail descriptor pointer */ #define XAXIDMA_CR_RUNSTOP_MASK 0x1 /* Start/stop DMA channel */ #define XAXIDMA_CR_RESET_MASK 0x4 /* Reset DMA engine */ #define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */ #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */ #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */ #define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */ #define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */ #define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */ #define XAXIDMA_DELAY_SHIFT 24 #define XAXIDMA_COALESCE_SHIFT 16 #define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */ #define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */ #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */ #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */ /* Default TX/RX Threshold and waitbound values for SGDMA mode */ #define XAXIDMA_DFT_TX_THRESHOLD 24 #define XAXIDMA_DFT_TX_WAITBOUND 254 #define XAXIDMA_DFT_RX_THRESHOLD 24 #define XAXIDMA_DFT_RX_WAITBOUND 254 #define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */ #define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */ #define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */ #define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */ #define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */ #define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */ #define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */ #define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */ #define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */ #define NIXGE_REG_CTRL_OFFSET 0x4000 #define NIXGE_REG_INFO 0x00 #define NIXGE_REG_MAC_CTL 0x04 #define NIXGE_REG_PHY_CTL 0x08 #define NIXGE_REG_LED_CTL 0x0c #define NIXGE_REG_MDIO_DATA 0x10 #define NIXGE_REG_MDIO_ADDR 0x14 #define NIXGE_REG_MDIO_OP 0x18 #define NIXGE_REG_MDIO_CTRL 0x1c #define NIXGE_ID_LED_CTL_EN BIT(0) #define NIXGE_ID_LED_CTL_VAL BIT(1) #define NIXGE_MDIO_CLAUSE45 BIT(12) #define NIXGE_MDIO_CLAUSE22 0 #define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10) #define NIXGE_MDIO_OP_ADDRESS 0 #define NIXGE_MDIO_C45_WRITE BIT(0) #define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0)) #define NIXGE_MDIO_C22_WRITE BIT(0) #define NIXGE_MDIO_C22_READ BIT(1) #define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5) #define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0) #define NIXGE_REG_MAC_LSB 0x1000 #define NIXGE_REG_MAC_MSB 0x1004 /* Packet size info */ #define NIXGE_HDR_SIZE 14 /* Size of Ethernet header */ #define NIXGE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */ #define NIXGE_MTU 1500 /* Max MTU of an Ethernet frame */ #define NIXGE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */ #define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) #define NIXGE_MAX_JUMBO_FRAME_SIZE \ (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) enum nixge_version { NIXGE_V2, NIXGE_V3, NIXGE_VERSION_COUNT }; struct nixge_hw_dma_bd { u32 next_lo; u32 next_hi; u32 phys_lo; u32 phys_hi; u32 reserved3; u32 reserved4; u32 cntrl; u32 status; u32 app0; u32 app1; u32 app2; u32 app3; u32 app4; u32 sw_id_offset_lo; u32 sw_id_offset_hi; u32 reserved6; }; #ifdef CONFIG_PHYS_ADDR_T_64BIT #define nixge_hw_dma_bd_set_addr(bd, field, addr) \ do { \ (bd)->field##_lo = lower_32_bits((addr)); \ (bd)->field##_hi = upper_32_bits((addr)); \ } while (0) #else #define nixge_hw_dma_bd_set_addr(bd, field, addr) \ ((bd)->field##_lo = lower_32_bits((addr))) #endif #define nixge_hw_dma_bd_set_phys(bd, addr) \ nixge_hw_dma_bd_set_addr((bd), phys, (addr)) #define nixge_hw_dma_bd_set_next(bd, addr) \ nixge_hw_dma_bd_set_addr((bd), next, (addr)) #define nixge_hw_dma_bd_set_offset(bd, addr) \ nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr)) #ifdef CONFIG_PHYS_ADDR_T_64BIT #define nixge_hw_dma_bd_get_addr(bd, field) \ (dma_addr_t)((((u64)(bd)->field##_hi) << 32) | ((bd)->field##_lo)) #else #define nixge_hw_dma_bd_get_addr(bd, field) \ (dma_addr_t)((bd)->field##_lo) #endif struct nixge_tx_skb { struct sk_buff *skb; dma_addr_t mapping; size_t size; bool mapped_as_page; }; struct nixge_priv { struct net_device *ndev; struct napi_struct napi; struct device *dev; /* Connection to PHY device */ struct device_node *phy_node; phy_interface_t phy_mode; int link; unsigned int speed; unsigned int duplex; /* MDIO bus data */ struct mii_bus *mii_bus; /* MII bus reference */ /* IO registers, dma functions and IRQs */ void __iomem *ctrl_regs; void __iomem *dma_regs; struct tasklet_struct dma_err_tasklet; int tx_irq; int rx_irq; /* Buffer descriptors */ struct nixge_hw_dma_bd *tx_bd_v; struct nixge_tx_skb *tx_skb; dma_addr_t tx_bd_p; struct nixge_hw_dma_bd *rx_bd_v; dma_addr_t rx_bd_p; u32 tx_bd_ci; u32 tx_bd_tail; u32 rx_bd_ci; u32 coalesce_count_rx; u32 coalesce_count_tx; }; static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val) { writel(val, priv->dma_regs + offset); } static void nixge_dma_write_desc_reg(struct nixge_priv *priv, off_t offset, dma_addr_t addr) { writel(lower_32_bits(addr), priv->dma_regs + offset); #ifdef CONFIG_PHYS_ADDR_T_64BIT writel(upper_32_bits(addr), priv->dma_regs + offset + 4); #endif } static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset) { return readl(priv->dma_regs + offset); } static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val) { writel(val, priv->ctrl_regs + offset); } static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset) { return readl(priv->ctrl_regs + offset); } #define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \ readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \ (sleep_us), (timeout_us)) #define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \ readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \ (sleep_us), (timeout_us)) static void nixge_hw_dma_bd_release(struct net_device *ndev) { struct nixge_priv *priv = netdev_priv(ndev); dma_addr_t phys_addr; struct sk_buff *skb; int i; if (priv->rx_bd_v) { for (i = 0; i < RX_BD_NUM; i++) { phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], phys); dma_unmap_single(ndev->dev.parent, phys_addr, NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); skb = (struct sk_buff *)(uintptr_t) nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], sw_id_offset); dev_kfree_skb(skb); } dma_free_coherent(ndev->dev.parent, sizeof(*priv->rx_bd_v) * RX_BD_NUM, priv->rx_bd_v, priv->rx_bd_p); } if (priv->tx_skb) devm_kfree(ndev->dev.parent, priv->tx_skb); if (priv->tx_bd_v) dma_free_coherent(ndev->dev.parent, sizeof(*priv->tx_bd_v) * TX_BD_NUM, priv->tx_bd_v, priv->tx_bd_p); } static int nixge_hw_dma_bd_init(struct net_device *ndev) { struct nixge_priv *priv = netdev_priv(ndev); struct sk_buff *skb; dma_addr_t phys; u32 cr; int i; /* Reset the indexes which are used for accessing the BDs */ priv->tx_bd_ci = 0; priv->tx_bd_tail = 0; priv->rx_bd_ci = 0; /* Allocate the Tx and Rx buffer descriptors. */ priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*priv->tx_bd_v) * TX_BD_NUM, &priv->tx_bd_p, GFP_KERNEL); if (!priv->tx_bd_v) goto out; priv->tx_skb = devm_kcalloc(ndev->dev.parent, TX_BD_NUM, sizeof(*priv->tx_skb), GFP_KERNEL); if (!priv->tx_skb) goto out; priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, sizeof(*priv->rx_bd_v) * RX_BD_NUM, &priv->rx_bd_p, GFP_KERNEL); if (!priv->rx_bd_v) goto out; for (i = 0; i < TX_BD_NUM; i++) { nixge_hw_dma_bd_set_next(&priv->tx_bd_v[i], priv->tx_bd_p + sizeof(*priv->tx_bd_v) * ((i + 1) % TX_BD_NUM)); } for (i = 0; i < RX_BD_NUM; i++) { nixge_hw_dma_bd_set_next(&priv->rx_bd_v[i], priv->rx_bd_p + sizeof(*priv->rx_bd_v) * ((i + 1) % RX_BD_NUM)); skb = __netdev_alloc_skb_ip_align(ndev, NIXGE_MAX_JUMBO_FRAME_SIZE, GFP_KERNEL); if (!skb) goto out; nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], (uintptr_t)skb); phys = dma_map_single(ndev->dev.parent, skb->data, NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); nixge_hw_dma_bd_set_phys(&priv->rx_bd_v[i], phys); priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE; } /* Start updating the Rx channel control register */ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); /* Update the interrupt coalesce count */ cr = ((cr & ~XAXIDMA_COALESCE_MASK) | ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); /* Update the delay timer count */ cr = ((cr & ~XAXIDMA_DELAY_MASK) | (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); /* Enable coalesce, delay timer and error interrupts */ cr |= XAXIDMA_IRQ_ALL_MASK; /* Write to the Rx channel control register */ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); /* Start updating the Tx channel control register */ cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); /* Update the interrupt coalesce count */ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); /* Update the delay timer count */ cr = (((cr & ~XAXIDMA_DELAY_MASK)) | (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); /* Enable coalesce, delay timer and error interrupts */ cr |= XAXIDMA_IRQ_ALL_MASK; /* Write to the Tx channel control register */ nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr); /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception. */ nixge_dma_write_desc_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p); cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p + (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1))); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the * tail pointer register that the Tx channel will start transmitting. */ nixge_dma_write_desc_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p); cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); return 0; out: nixge_hw_dma_bd_release(ndev); return -ENOMEM; } static void __nixge_device_reset(struct nixge_priv *priv, off_t offset) { u32 status; int err; /* Reset Axi DMA. This would reset NIXGE Ethernet core as well. * The reset process of Axi DMA takes a while to complete as all * pending commands/transfers will be flushed or completed during * this reset process. */ nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK); err = nixge_dma_poll_timeout(priv, offset, status, !(status & XAXIDMA_CR_RESET_MASK), 10, 1000); if (err) netdev_err(priv->ndev, "%s: DMA reset timeout!\n", __func__); } static void nixge_device_reset(struct net_device *ndev) { struct nixge_priv *priv = netdev_priv(ndev); __nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET); __nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET); if (nixge_hw_dma_bd_init(ndev)) netdev_err(ndev, "%s: descriptor allocation failed\n", __func__); netif_trans_update(ndev); } static void nixge_handle_link_change(struct net_device *ndev) { struct nixge_priv *priv = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; if (phydev->link != priv->link || phydev->speed != priv->speed || phydev->duplex != priv->duplex) { priv->link = phydev->link; priv->speed = phydev->speed; priv->duplex = phydev->duplex; phy_print_status(phydev); } } static void nixge_tx_skb_unmap(struct nixge_priv *priv, struct nixge_tx_skb *tx_skb) { if (tx_skb->mapping) { if (tx_skb->mapped_as_page) dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping, tx_skb->size, DMA_TO_DEVICE); else dma_unmap_single(priv->ndev->dev.parent, tx_skb->mapping, tx_skb->size, DMA_TO_DEVICE); tx_skb->mapping = 0; } if (tx_skb->skb) { dev_kfree_skb_any(tx_skb->skb); tx_skb->skb = NULL; } } static void nixge_start_xmit_done(struct net_device *ndev) { struct nixge_priv *priv = netdev_priv(ndev); struct nixge_hw_dma_bd *cur_p; struct nixge_tx_skb *tx_skb; unsigned int status = 0; u32 packets = 0; u32 size = 0; cur_p = &priv->tx_bd_v[priv->tx_bd_ci]; tx_skb = &priv->tx_skb[priv->tx_bd_ci]; status = cur_p->status; while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { nixge_tx_skb_unmap(priv, tx_skb); cur_p->status = 0; size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; packets++; ++priv->tx_bd_ci; priv->tx_bd_ci %= TX_BD_NUM; cur_p = &priv->tx_bd_v[priv->tx_bd_ci]; tx_skb = &priv->tx_skb[priv->tx_bd_ci]; status = cur_p->status; } ndev->stats.tx_packets += packets; ndev->stats.tx_bytes += size; if (packets) netif_wake_queue(ndev); } static int nixge_check_tx_bd_space(struct nixge_priv *priv, int num_frag) { struct nixge_hw_dma_bd *cur_p; cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM]; if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) return NETDEV_TX_BUSY; return 0; } static netdev_tx_t nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct nixge_priv *priv = netdev_priv(ndev); struct nixge_hw_dma_bd *cur_p; struct nixge_tx_skb *tx_skb; dma_addr_t tail_p, cur_phys; skb_frag_t *frag; u32 num_frag; u32 ii; num_frag = skb_shinfo(skb)->nr_frags; cur_p = &priv->tx_bd_v[priv->tx_bd_tail]; tx_skb = &priv->tx_skb[priv->tx_bd_tail]; if (nixge_check_tx_bd_space(priv, num_frag)) { if (!netif_queue_stopped(ndev)) netif_stop_queue(ndev); return NETDEV_TX_OK; } cur_phys = dma_map_single(ndev->dev.parent, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (dma_mapping_error(ndev->dev.parent, cur_phys)) goto drop; nixge_hw_dma_bd_set_phys(cur_p, cur_phys); cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; tx_skb->skb = NULL; tx_skb->mapping = cur_phys; tx_skb->size = skb_headlen(skb); tx_skb->mapped_as_page = false; for (ii = 0; ii < num_frag; ii++) { ++priv->tx_bd_tail; priv->tx_bd_tail %= TX_BD_NUM; cur_p = &priv->tx_bd_v[priv->tx_bd_tail]; tx_skb = &priv->tx_skb[priv->tx_bd_tail]; frag = &skb_shinfo(skb)->frags[ii]; cur_phys = skb_frag_dma_map(ndev->dev.parent, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(ndev->dev.parent, cur_phys)) goto frag_err; nixge_hw_dma_bd_set_phys(cur_p, cur_phys); cur_p->cntrl = skb_frag_size(frag); tx_skb->skb = NULL; tx_skb->mapping = cur_phys; tx_skb->size = skb_frag_size(frag); tx_skb->mapped_as_page = true; } /* last buffer of the frame */ tx_skb->skb = skb; cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail; /* Start the transfer */ nixge_dma_write_desc_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p); ++priv->tx_bd_tail; priv->tx_bd_tail %= TX_BD_NUM; return NETDEV_TX_OK; frag_err: for (; ii > 0; ii--) { if (priv->tx_bd_tail) priv->tx_bd_tail--; else priv->tx_bd_tail = TX_BD_NUM - 1; tx_skb = &priv->tx_skb[priv->tx_bd_tail]; nixge_tx_skb_unmap(priv, tx_skb); cur_p = &priv->tx_bd_v[priv->tx_bd_tail]; cur_p->status = 0; } dma_unmap_single(priv->ndev->dev.parent, tx_skb->mapping, tx_skb->size, DMA_TO_DEVICE); drop: ndev->stats.tx_dropped++; return NETDEV_TX_OK; } static int nixge_recv(struct net_device *ndev, int budget) { struct nixge_priv *priv = netdev_priv(ndev); struct sk_buff *skb, *new_skb; struct nixge_hw_dma_bd *cur_p; dma_addr_t tail_p = 0, cur_phys = 0; u32 packets = 0; u32 length = 0; u32 size = 0; cur_p = &priv->rx_bd_v[priv->rx_bd_ci]; while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK && budget > packets)) { tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) * priv->rx_bd_ci; skb = (struct sk_buff *)(uintptr_t) nixge_hw_dma_bd_get_addr(cur_p, sw_id_offset); length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; if (length > NIXGE_MAX_JUMBO_FRAME_SIZE) length = NIXGE_MAX_JUMBO_FRAME_SIZE; dma_unmap_single(ndev->dev.parent, nixge_hw_dma_bd_get_addr(cur_p, phys), NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); skb_put(skb, length); skb->protocol = eth_type_trans(skb, ndev); skb_checksum_none_assert(skb); /* For now mark them as CHECKSUM_NONE since * we don't have offload capabilities */ skb->ip_summed = CHECKSUM_NONE; napi_gro_receive(&priv->napi, skb); size += length; packets++; new_skb = netdev_alloc_skb_ip_align(ndev, NIXGE_MAX_JUMBO_FRAME_SIZE); if (!new_skb) return packets; cur_phys = dma_map_single(ndev->dev.parent, new_skb->data, NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(ndev->dev.parent, cur_phys)) { /* FIXME: bail out and clean up */ netdev_err(ndev, "Failed to map ...\n"); } nixge_hw_dma_bd_set_phys(cur_p, cur_phys); cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE; cur_p->status = 0; nixge_hw_dma_bd_set_offset(cur_p, (uintptr_t)new_skb); ++priv->rx_bd_ci; priv->rx_bd_ci %= RX_BD_NUM; cur_p = &priv->rx_bd_v[priv->rx_bd_ci]; } ndev->stats.rx_packets += packets; ndev->stats.rx_bytes += size; if (tail_p) nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p); return packets; } static int nixge_poll(struct napi_struct *napi, int budget) { struct nixge_priv *priv = container_of(napi, struct nixge_priv, napi); int work_done; u32 status, cr; work_done = 0; work_done = nixge_recv(priv->ndev, budget); if (work_done < budget) { napi_complete_done(napi, work_done); status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET); if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { /* If there's more, reschedule, but clear */ nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status); napi_reschedule(napi); } else { /* if not, turn on RX IRQs again ... */ cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); } } return work_done; } static irqreturn_t nixge_tx_irq(int irq, void *_ndev) { struct nixge_priv *priv = netdev_priv(_ndev); struct net_device *ndev = _ndev; unsigned int status; dma_addr_t phys; u32 cr; status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET); if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status); nixge_start_xmit_done(priv->ndev); goto out; } if (!(status & XAXIDMA_IRQ_ALL_MASK)) { netdev_err(ndev, "No interrupts asserted in Tx path\n"); return IRQ_NONE; } if (status & XAXIDMA_IRQ_ERROR_MASK) { phys = nixge_hw_dma_bd_get_addr(&priv->tx_bd_v[priv->tx_bd_ci], phys); netdev_err(ndev, "DMA Tx error 0x%x\n", status); netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys); cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); /* Disable coalesce, delay timer and error interrupts */ cr &= (~XAXIDMA_IRQ_ALL_MASK); /* Write to the Tx channel control register */ nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr); cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); /* Disable coalesce, delay timer and error interrupts */ cr &= (~XAXIDMA_IRQ_ALL_MASK); /* Write to the Rx channel control register */ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); tasklet_schedule(&priv->dma_err_tasklet); nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status); } out: return IRQ_HANDLED; } static irqreturn_t nixge_rx_irq(int irq, void *_ndev) { struct nixge_priv *priv = netdev_priv(_ndev); struct net_device *ndev = _ndev; unsigned int status; dma_addr_t phys; u32 cr; status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET); if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { /* Turn of IRQs because NAPI */ nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status); cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); if (napi_schedule_prep(&priv->napi)) __napi_schedule(&priv->napi); goto out; } if (!(status & XAXIDMA_IRQ_ALL_MASK)) { netdev_err(ndev, "No interrupts asserted in Rx path\n"); return IRQ_NONE; } if (status & XAXIDMA_IRQ_ERROR_MASK) { phys = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[priv->rx_bd_ci], phys); netdev_err(ndev, "DMA Rx error 0x%x\n", status); netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys); cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); /* Disable coalesce, delay timer and error interrupts */ cr &= (~XAXIDMA_IRQ_ALL_MASK); /* Finally write to the Tx channel control register */ nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr); cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); /* Disable coalesce, delay timer and error interrupts */ cr &= (~XAXIDMA_IRQ_ALL_MASK); /* write to the Rx channel control register */ nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr); tasklet_schedule(&priv->dma_err_tasklet); nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status); } out: return IRQ_HANDLED; } static void nixge_dma_err_handler(struct tasklet_struct *t) { struct nixge_priv *lp = from_tasklet(lp, t, dma_err_tasklet); struct nixge_hw_dma_bd *cur_p; struct nixge_tx_skb *tx_skb; u32 cr, i; __nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET); __nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET); for (i = 0; i < TX_BD_NUM; i++) { cur_p = &lp->tx_bd_v[i]; tx_skb = &lp->tx_skb[i]; nixge_tx_skb_unmap(lp, tx_skb); nixge_hw_dma_bd_set_phys(cur_p, 0); cur_p->cntrl = 0; cur_p->status = 0; nixge_hw_dma_bd_set_offset(cur_p, 0); } for (i = 0; i < RX_BD_NUM; i++) { cur_p = &lp->rx_bd_v[i]; cur_p->status = 0; } lp->tx_bd_ci = 0; lp->tx_bd_tail = 0; lp->rx_bd_ci = 0; /* Start updating the Rx channel control register */ cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET); /* Update the interrupt coalesce count */ cr = ((cr & ~XAXIDMA_COALESCE_MASK) | (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); /* Update the delay timer count */ cr = ((cr & ~XAXIDMA_DELAY_MASK) | (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); /* Enable coalesce, delay timer and error interrupts */ cr |= XAXIDMA_IRQ_ALL_MASK; /* Finally write to the Rx channel control register */ nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr); /* Start updating the Tx channel control register */ cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET); /* Update the interrupt coalesce count */ cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); /* Update the delay timer count */ cr = (((cr & ~XAXIDMA_DELAY_MASK)) | (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); /* Enable coalesce, delay timer and error interrupts */ cr |= XAXIDMA_IRQ_ALL_MASK; /* Finally write to the Tx channel control register */ nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr); /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception. */ nixge_dma_write_desc_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET); nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); nixge_dma_write_desc_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the * tail pointer register that the Tx channel will start transmitting */ nixge_dma_write_desc_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET); nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); } static int nixge_open(struct net_device *ndev) { struct nixge_priv *priv = netdev_priv(ndev); struct phy_device *phy; int ret; nixge_device_reset(ndev); phy = of_phy_connect(ndev, priv->phy_node, &nixge_handle_link_change, 0, priv->phy_mode); if (!phy) return -ENODEV; phy_start(phy); /* Enable tasklets for Axi DMA error handling */ tasklet_setup(&priv->dma_err_tasklet, nixge_dma_err_handler); napi_enable(&priv->napi); /* Enable interrupts for Axi DMA Tx */ ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev); if (ret) goto err_tx_irq; /* Enable interrupts for Axi DMA Rx */ ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev); if (ret) goto err_rx_irq; netif_start_queue(ndev); return 0; err_rx_irq: free_irq(priv->tx_irq, ndev); err_tx_irq: napi_disable(&priv->napi); phy_stop(phy); phy_disconnect(phy); tasklet_kill(&priv->dma_err_tasklet); netdev_err(ndev, "request_irq() failed\n"); return ret; } static int nixge_stop(struct net_device *ndev) { struct nixge_priv *priv = netdev_priv(ndev); u32 cr; netif_stop_queue(ndev); napi_disable(&priv->napi); if (ndev->phydev) { phy_stop(ndev->phydev); phy_disconnect(ndev->phydev); } cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr & (~XAXIDMA_CR_RUNSTOP_MASK)); cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr & (~XAXIDMA_CR_RUNSTOP_MASK)); tasklet_kill(&priv->dma_err_tasklet); free_irq(priv->tx_irq, ndev); free_irq(priv->rx_irq, ndev); nixge_hw_dma_bd_release(ndev); return 0; } static int nixge_change_mtu(struct net_device *ndev, int new_mtu) { if (netif_running(ndev)) return -EBUSY; if ((new_mtu + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) > NIXGE_MAX_JUMBO_FRAME_SIZE) return -EINVAL; ndev->mtu = new_mtu; return 0; } static s32 __nixge_hw_set_mac_address(struct net_device *ndev) { struct nixge_priv *priv = netdev_priv(ndev); nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB, (ndev->dev_addr[2]) << 24 | (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0)); nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB, (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8))); return 0; } static int nixge_net_set_mac_address(struct net_device *ndev, void *p) { int err; err = eth_mac_addr(ndev, p); if (!err) __nixge_hw_set_mac_address(ndev); return err; } static const struct net_device_ops nixge_netdev_ops = { .ndo_open = nixge_open, .ndo_stop = nixge_stop, .ndo_start_xmit = nixge_start_xmit, .ndo_change_mtu = nixge_change_mtu, .ndo_set_mac_address = nixge_net_set_mac_address, .ndo_validate_addr = eth_validate_addr, }; static void nixge_ethtools_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) { strscpy(ed->driver, "nixge", sizeof(ed->driver)); strscpy(ed->bus_info, "platform", sizeof(ed->bus_info)); } static int nixge_ethtools_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ecoalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct nixge_priv *priv = netdev_priv(ndev); u32 regval = 0; regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) >> XAXIDMA_COALESCE_SHIFT; regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) >> XAXIDMA_COALESCE_SHIFT; return 0; } static int nixge_ethtools_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ecoalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct nixge_priv *priv = netdev_priv(ndev); if (netif_running(ndev)) { netdev_err(ndev, "Please stop netif before applying configuration\n"); return -EBUSY; } if (ecoalesce->rx_max_coalesced_frames) priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; if (ecoalesce->tx_max_coalesced_frames) priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; return 0; } static int nixge_ethtools_set_phys_id(struct net_device *ndev, enum ethtool_phys_id_state state) { struct nixge_priv *priv = netdev_priv(ndev); u32 ctrl; ctrl = nixge_ctrl_read_reg(priv, NIXGE_REG_LED_CTL); switch (state) { case ETHTOOL_ID_ACTIVE: ctrl |= NIXGE_ID_LED_CTL_EN; /* Enable identification LED override*/ nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); return 2; case ETHTOOL_ID_ON: ctrl |= NIXGE_ID_LED_CTL_VAL; nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); break; case ETHTOOL_ID_OFF: ctrl &= ~NIXGE_ID_LED_CTL_VAL; nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); break; case ETHTOOL_ID_INACTIVE: /* Restore LED settings */ ctrl &= ~NIXGE_ID_LED_CTL_EN; nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl); break; } return 0; } static const struct ethtool_ops nixge_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES, .get_drvinfo = nixge_ethtools_get_drvinfo, .get_coalesce = nixge_ethtools_get_coalesce, .set_coalesce = nixge_ethtools_set_coalesce, .set_phys_id = nixge_ethtools_set_phys_id, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, .get_link = ethtool_op_get_link, }; static int nixge_mdio_read_c22(struct mii_bus *bus, int phy_id, int reg) { struct nixge_priv *priv = bus->priv; u32 status, tmp; int err; u16 device; device = reg & 0x1f; tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, !status, 10, 1000); if (err) { dev_err(priv->dev, "timeout setting read command"); return err; } status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA); return status; } static int nixge_mdio_read_c45(struct mii_bus *bus, int phy_id, int device, int reg) { struct nixge_priv *priv = bus->priv; u32 status, tmp; int err; nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff); tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, !status, 10, 1000); if (err) { dev_err(priv->dev, "timeout setting address"); return err; } tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, !status, 10, 1000); if (err) { dev_err(priv->dev, "timeout setting read command"); return err; } status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA); return status; } static int nixge_mdio_write_c22(struct mii_bus *bus, int phy_id, int reg, u16 val) { struct nixge_priv *priv = bus->priv; u32 status, tmp; u16 device; int err; device = reg & 0x1f; tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val); nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, !status, 10, 1000); if (err) dev_err(priv->dev, "timeout setting write command"); return err; } static int nixge_mdio_write_c45(struct mii_bus *bus, int phy_id, int device, int reg, u16 val) { struct nixge_priv *priv = bus->priv; u32 status, tmp; int err; nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff); tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1); err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, !status, 10, 1000); if (err) { dev_err(priv->dev, "timeout setting address"); return err; } tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE) | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device); nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val); nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp); err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status, !status, 10, 1000); if (err) dev_err(priv->dev, "timeout setting write command"); return err; } static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np) { struct mii_bus *bus; bus = devm_mdiobus_alloc(priv->dev); if (!bus) return -ENOMEM; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev)); bus->priv = priv; bus->name = "nixge_mii_bus"; bus->read = nixge_mdio_read_c22; bus->write = nixge_mdio_write_c22; bus->read_c45 = nixge_mdio_read_c45; bus->write_c45 = nixge_mdio_write_c45; bus->parent = priv->dev; priv->mii_bus = bus; return of_mdiobus_register(bus, np); } static void *nixge_get_nvmem_address(struct device *dev) { struct nvmem_cell *cell; size_t cell_size; char *mac; cell = nvmem_cell_get(dev, "address"); if (IS_ERR(cell)) return cell; mac = nvmem_cell_read(cell, &cell_size); nvmem_cell_put(cell); return mac; } /* Match table for of_platform binding */ static const struct of_device_id nixge_dt_ids[] = { { .compatible = "ni,xge-enet-2.00", .data = (void *)NIXGE_V2 }, { .compatible = "ni,xge-enet-3.00", .data = (void *)NIXGE_V3 }, {}, }; MODULE_DEVICE_TABLE(of, nixge_dt_ids); static int nixge_of_get_resources(struct platform_device *pdev) { const struct of_device_id *of_id; enum nixge_version version; struct net_device *ndev; struct nixge_priv *priv; ndev = platform_get_drvdata(pdev); priv = netdev_priv(ndev); of_id = of_match_node(nixge_dt_ids, pdev->dev.of_node); if (!of_id) return -ENODEV; version = (enum nixge_version)of_id->data; if (version <= NIXGE_V2) priv->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); else priv->dma_regs = devm_platform_ioremap_resource_byname(pdev, "dma"); if (IS_ERR(priv->dma_regs)) { netdev_err(ndev, "failed to map dma regs\n"); return PTR_ERR(priv->dma_regs); } if (version <= NIXGE_V2) priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET; else priv->ctrl_regs = devm_platform_ioremap_resource_byname(pdev, "ctrl"); if (IS_ERR(priv->ctrl_regs)) { netdev_err(ndev, "failed to map ctrl regs\n"); return PTR_ERR(priv->ctrl_regs); } return 0; } static int nixge_probe(struct platform_device *pdev) { struct device_node *mn, *phy_node; struct nixge_priv *priv; struct net_device *ndev; const u8 *mac_addr; int err; ndev = alloc_etherdev(sizeof(*priv)); if (!ndev) return -ENOMEM; platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->features = NETIF_F_SG; ndev->netdev_ops = &nixge_netdev_ops; ndev->ethtool_ops = &nixge_ethtool_ops; /* MTU range: 64 - 9000 */ ndev->min_mtu = 64; ndev->max_mtu = NIXGE_JUMBO_MTU; mac_addr = nixge_get_nvmem_address(&pdev->dev); if (!IS_ERR(mac_addr) && is_valid_ether_addr(mac_addr)) { eth_hw_addr_set(ndev, mac_addr); kfree(mac_addr); } else { eth_hw_addr_random(ndev); } priv = netdev_priv(ndev); priv->ndev = ndev; priv->dev = &pdev->dev; netif_napi_add(ndev, &priv->napi, nixge_poll); err = nixge_of_get_resources(pdev); if (err) goto free_netdev; __nixge_hw_set_mac_address(ndev); priv->tx_irq = platform_get_irq_byname(pdev, "tx"); if (priv->tx_irq < 0) { netdev_err(ndev, "could not find 'tx' irq"); err = priv->tx_irq; goto free_netdev; } priv->rx_irq = platform_get_irq_byname(pdev, "rx"); if (priv->rx_irq < 0) { netdev_err(ndev, "could not find 'rx' irq"); err = priv->rx_irq; goto free_netdev; } priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; mn = of_get_child_by_name(pdev->dev.of_node, "mdio"); if (mn) { err = nixge_mdio_setup(priv, mn); of_node_put(mn); if (err) { netdev_err(ndev, "error registering mdio bus"); goto free_netdev; } } err = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode); if (err) { netdev_err(ndev, "not find \"phy-mode\" property\n"); goto unregister_mdio; } phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (!phy_node && of_phy_is_fixed_link(pdev->dev.of_node)) { err = of_phy_register_fixed_link(pdev->dev.of_node); if (err < 0) { netdev_err(ndev, "broken fixed-link specification\n"); goto unregister_mdio; } phy_node = of_node_get(pdev->dev.of_node); } priv->phy_node = phy_node; err = register_netdev(priv->ndev); if (err) { netdev_err(ndev, "register_netdev() error (%i)\n", err); goto free_phy; } return 0; free_phy: if (of_phy_is_fixed_link(pdev->dev.of_node)) of_phy_deregister_fixed_link(pdev->dev.of_node); of_node_put(phy_node); unregister_mdio: if (priv->mii_bus) mdiobus_unregister(priv->mii_bus); free_netdev: free_netdev(ndev); return err; } static int nixge_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct nixge_priv *priv = netdev_priv(ndev); unregister_netdev(ndev); if (of_phy_is_fixed_link(pdev->dev.of_node)) of_phy_deregister_fixed_link(pdev->dev.of_node); of_node_put(priv->phy_node); if (priv->mii_bus) mdiobus_unregister(priv->mii_bus); free_netdev(ndev); return 0; } static struct platform_driver nixge_driver = { .probe = nixge_probe, .remove = nixge_remove, .driver = { .name = "nixge", .of_match_table = nixge_dt_ids, }, }; module_platform_driver(nixge_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("National Instruments XGE Management MAC"); MODULE_AUTHOR("Moritz Fischer <[email protected]>");
linux-master
drivers/net/ethernet/ni/nixge.c
// SPDX-License-Identifier: GPL-2.0 /* Atheros AR71xx built-in ethernet mac driver * * Copyright (C) 2019 Oleksij Rempel <[email protected]> * * List of authors contributed to this driver before mainlining: * Alexander Couzens <[email protected]> * Christian Lamparter <[email protected]> * Chuanhong Guo <[email protected]> * Daniel F. Dickinson <[email protected]> * David Bauer <[email protected]> * Felix Fietkau <[email protected]> * Gabor Juhos <[email protected]> * Hauke Mehrtens <[email protected]> * Johann Neuhauser <[email protected]> * John Crispin <[email protected]> * Jo-Philipp Wich <[email protected]> * Koen Vandeputte <[email protected]> * Lucian Cristian <[email protected]> * Matt Merhar <[email protected]> * Milan Krstic <[email protected]> * Petr Štetiar <[email protected]> * Rosen Penev <[email protected]> * Stephen Walker <[email protected]> * Vittorio Gambaletta <[email protected]> * Weijie Gao <[email protected]> * Imre Kaloz <[email protected]> */ #include <linux/if_vlan.h> #include <linux/mfd/syscon.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/platform_device.h> #include <linux/phylink.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/clk.h> #include <linux/io.h> #include <net/selftests.h> /* For our NAPI weight bigger does *NOT* mean better - it means more * D-cache misses and lots more wasted cycles than we'll ever * possibly gain from saving instructions. */ #define AG71XX_NAPI_WEIGHT 32 #define AG71XX_OOM_REFILL (1 + HZ / 10) #define AG71XX_INT_ERR (AG71XX_INT_RX_BE | AG71XX_INT_TX_BE) #define AG71XX_INT_TX (AG71XX_INT_TX_PS) #define AG71XX_INT_RX (AG71XX_INT_RX_PR | AG71XX_INT_RX_OF) #define AG71XX_INT_POLL (AG71XX_INT_RX | AG71XX_INT_TX) #define AG71XX_INT_INIT (AG71XX_INT_ERR | AG71XX_INT_POLL) #define AG71XX_TX_MTU_LEN 1540 #define AG71XX_TX_RING_SPLIT 512 #define AG71XX_TX_RING_DS_PER_PKT DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \ AG71XX_TX_RING_SPLIT) #define AG71XX_TX_RING_SIZE_DEFAULT 128 #define AG71XX_RX_RING_SIZE_DEFAULT 256 #define AG71XX_MDIO_RETRY 1000 #define AG71XX_MDIO_DELAY 5 #define AG71XX_MDIO_MAX_CLK 5000000 /* Register offsets */ #define AG71XX_REG_MAC_CFG1 0x0000 #define MAC_CFG1_TXE BIT(0) /* Tx Enable */ #define MAC_CFG1_STX BIT(1) /* Synchronize Tx Enable */ #define MAC_CFG1_RXE BIT(2) /* Rx Enable */ #define MAC_CFG1_SRX BIT(3) /* Synchronize Rx Enable */ #define MAC_CFG1_TFC BIT(4) /* Tx Flow Control Enable */ #define MAC_CFG1_RFC BIT(5) /* Rx Flow Control Enable */ #define MAC_CFG1_SR BIT(31) /* Soft Reset */ #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \ MAC_CFG1_SRX | MAC_CFG1_STX) #define AG71XX_REG_MAC_CFG2 0x0004 #define MAC_CFG2_FDX BIT(0) #define MAC_CFG2_PAD_CRC_EN BIT(2) #define MAC_CFG2_LEN_CHECK BIT(4) #define MAC_CFG2_IF_1000 BIT(9) #define MAC_CFG2_IF_10_100 BIT(8) #define AG71XX_REG_MAC_MFL 0x0010 #define AG71XX_REG_MII_CFG 0x0020 #define MII_CFG_CLK_DIV_4 0 #define MII_CFG_CLK_DIV_6 2 #define MII_CFG_CLK_DIV_8 3 #define MII_CFG_CLK_DIV_10 4 #define MII_CFG_CLK_DIV_14 5 #define MII_CFG_CLK_DIV_20 6 #define MII_CFG_CLK_DIV_28 7 #define MII_CFG_CLK_DIV_34 8 #define MII_CFG_CLK_DIV_42 9 #define MII_CFG_CLK_DIV_50 10 #define MII_CFG_CLK_DIV_58 11 #define MII_CFG_CLK_DIV_66 12 #define MII_CFG_CLK_DIV_74 13 #define MII_CFG_CLK_DIV_82 14 #define MII_CFG_CLK_DIV_98 15 #define MII_CFG_RESET BIT(31) #define AG71XX_REG_MII_CMD 0x0024 #define MII_CMD_READ BIT(0) #define AG71XX_REG_MII_ADDR 0x0028 #define MII_ADDR_SHIFT 8 #define AG71XX_REG_MII_CTRL 0x002c #define AG71XX_REG_MII_STATUS 0x0030 #define AG71XX_REG_MII_IND 0x0034 #define MII_IND_BUSY BIT(0) #define MII_IND_INVALID BIT(2) #define AG71XX_REG_MAC_IFCTL 0x0038 #define MAC_IFCTL_SPEED BIT(16) #define AG71XX_REG_MAC_ADDR1 0x0040 #define AG71XX_REG_MAC_ADDR2 0x0044 #define AG71XX_REG_FIFO_CFG0 0x0048 #define FIFO_CFG0_WTM BIT(0) /* Watermark Module */ #define FIFO_CFG0_RXS BIT(1) /* Rx System Module */ #define FIFO_CFG0_RXF BIT(2) /* Rx Fabric Module */ #define FIFO_CFG0_TXS BIT(3) /* Tx System Module */ #define FIFO_CFG0_TXF BIT(4) /* Tx Fabric Module */ #define FIFO_CFG0_ALL (FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \ | FIFO_CFG0_TXS | FIFO_CFG0_TXF) #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT) #define FIFO_CFG0_ENABLE_SHIFT 8 #define AG71XX_REG_FIFO_CFG1 0x004c #define AG71XX_REG_FIFO_CFG2 0x0050 #define AG71XX_REG_FIFO_CFG3 0x0054 #define AG71XX_REG_FIFO_CFG4 0x0058 #define FIFO_CFG4_DE BIT(0) /* Drop Event */ #define FIFO_CFG4_DV BIT(1) /* RX_DV Event */ #define FIFO_CFG4_FC BIT(2) /* False Carrier */ #define FIFO_CFG4_CE BIT(3) /* Code Error */ #define FIFO_CFG4_CR BIT(4) /* CRC error */ #define FIFO_CFG4_LM BIT(5) /* Length Mismatch */ #define FIFO_CFG4_LO BIT(6) /* Length out of range */ #define FIFO_CFG4_OK BIT(7) /* Packet is OK */ #define FIFO_CFG4_MC BIT(8) /* Multicast Packet */ #define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */ #define FIFO_CFG4_DR BIT(10) /* Dribble */ #define FIFO_CFG4_LE BIT(11) /* Long Event */ #define FIFO_CFG4_CF BIT(12) /* Control Frame */ #define FIFO_CFG4_PF BIT(13) /* Pause Frame */ #define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */ #define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */ #define FIFO_CFG4_FT BIT(16) /* Frame Truncated */ #define FIFO_CFG4_UC BIT(17) /* Unicast Packet */ #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \ FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \ FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \ FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \ FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \ FIFO_CFG4_VT) #define AG71XX_REG_FIFO_CFG5 0x005c #define FIFO_CFG5_DE BIT(0) /* Drop Event */ #define FIFO_CFG5_DV BIT(1) /* RX_DV Event */ #define FIFO_CFG5_FC BIT(2) /* False Carrier */ #define FIFO_CFG5_CE BIT(3) /* Code Error */ #define FIFO_CFG5_LM BIT(4) /* Length Mismatch */ #define FIFO_CFG5_LO BIT(5) /* Length Out of Range */ #define FIFO_CFG5_OK BIT(6) /* Packet is OK */ #define FIFO_CFG5_MC BIT(7) /* Multicast Packet */ #define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */ #define FIFO_CFG5_DR BIT(9) /* Dribble */ #define FIFO_CFG5_CF BIT(10) /* Control Frame */ #define FIFO_CFG5_PF BIT(11) /* Pause Frame */ #define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */ #define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */ #define FIFO_CFG5_LE BIT(14) /* Long Event */ #define FIFO_CFG5_FT BIT(15) /* Frame Truncated */ #define FIFO_CFG5_16 BIT(16) /* unknown */ #define FIFO_CFG5_17 BIT(17) /* unknown */ #define FIFO_CFG5_SF BIT(18) /* Short Frame */ #define FIFO_CFG5_BM BIT(19) /* Byte Mode */ #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \ FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \ FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \ FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \ FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \ FIFO_CFG5_17 | FIFO_CFG5_SF) #define AG71XX_REG_TX_CTRL 0x0180 #define TX_CTRL_TXE BIT(0) /* Tx Enable */ #define AG71XX_REG_TX_DESC 0x0184 #define AG71XX_REG_TX_STATUS 0x0188 #define TX_STATUS_PS BIT(0) /* Packet Sent */ #define TX_STATUS_UR BIT(1) /* Tx Underrun */ #define TX_STATUS_BE BIT(3) /* Bus Error */ #define AG71XX_REG_RX_CTRL 0x018c #define RX_CTRL_RXE BIT(0) /* Rx Enable */ #define AG71XX_DMA_RETRY 10 #define AG71XX_DMA_DELAY 1 #define AG71XX_REG_RX_DESC 0x0190 #define AG71XX_REG_RX_STATUS 0x0194 #define RX_STATUS_PR BIT(0) /* Packet Received */ #define RX_STATUS_OF BIT(2) /* Rx Overflow */ #define RX_STATUS_BE BIT(3) /* Bus Error */ #define AG71XX_REG_INT_ENABLE 0x0198 #define AG71XX_REG_INT_STATUS 0x019c #define AG71XX_INT_TX_PS BIT(0) #define AG71XX_INT_TX_UR BIT(1) #define AG71XX_INT_TX_BE BIT(3) #define AG71XX_INT_RX_PR BIT(4) #define AG71XX_INT_RX_OF BIT(6) #define AG71XX_INT_RX_BE BIT(7) #define AG71XX_REG_FIFO_DEPTH 0x01a8 #define AG71XX_REG_RX_SM 0x01b0 #define AG71XX_REG_TX_SM 0x01b4 #define AG71XX_DEFAULT_MSG_ENABLE \ (NETIF_MSG_DRV \ | NETIF_MSG_PROBE \ | NETIF_MSG_LINK \ | NETIF_MSG_TIMER \ | NETIF_MSG_IFDOWN \ | NETIF_MSG_IFUP \ | NETIF_MSG_RX_ERR \ | NETIF_MSG_TX_ERR) struct ag71xx_statistic { unsigned short offset; u32 mask; const char name[ETH_GSTRING_LEN]; }; static const struct ag71xx_statistic ag71xx_statistics[] = { { 0x0080, GENMASK(17, 0), "Tx/Rx 64 Byte", }, { 0x0084, GENMASK(17, 0), "Tx/Rx 65-127 Byte", }, { 0x0088, GENMASK(17, 0), "Tx/Rx 128-255 Byte", }, { 0x008C, GENMASK(17, 0), "Tx/Rx 256-511 Byte", }, { 0x0090, GENMASK(17, 0), "Tx/Rx 512-1023 Byte", }, { 0x0094, GENMASK(17, 0), "Tx/Rx 1024-1518 Byte", }, { 0x0098, GENMASK(17, 0), "Tx/Rx 1519-1522 Byte VLAN", }, { 0x009C, GENMASK(23, 0), "Rx Byte", }, { 0x00A0, GENMASK(17, 0), "Rx Packet", }, { 0x00A4, GENMASK(11, 0), "Rx FCS Error", }, { 0x00A8, GENMASK(17, 0), "Rx Multicast Packet", }, { 0x00AC, GENMASK(21, 0), "Rx Broadcast Packet", }, { 0x00B0, GENMASK(17, 0), "Rx Control Frame Packet", }, { 0x00B4, GENMASK(11, 0), "Rx Pause Frame Packet", }, { 0x00B8, GENMASK(11, 0), "Rx Unknown OPCode Packet", }, { 0x00BC, GENMASK(11, 0), "Rx Alignment Error", }, { 0x00C0, GENMASK(15, 0), "Rx Frame Length Error", }, { 0x00C4, GENMASK(11, 0), "Rx Code Error", }, { 0x00C8, GENMASK(11, 0), "Rx Carrier Sense Error", }, { 0x00CC, GENMASK(11, 0), "Rx Undersize Packet", }, { 0x00D0, GENMASK(11, 0), "Rx Oversize Packet", }, { 0x00D4, GENMASK(11, 0), "Rx Fragments", }, { 0x00D8, GENMASK(11, 0), "Rx Jabber", }, { 0x00DC, GENMASK(11, 0), "Rx Dropped Packet", }, { 0x00E0, GENMASK(23, 0), "Tx Byte", }, { 0x00E4, GENMASK(17, 0), "Tx Packet", }, { 0x00E8, GENMASK(17, 0), "Tx Multicast Packet", }, { 0x00EC, GENMASK(17, 0), "Tx Broadcast Packet", }, { 0x00F0, GENMASK(11, 0), "Tx Pause Control Frame", }, { 0x00F4, GENMASK(11, 0), "Tx Deferral Packet", }, { 0x00F8, GENMASK(11, 0), "Tx Excessive Deferral Packet", }, { 0x00FC, GENMASK(11, 0), "Tx Single Collision Packet", }, { 0x0100, GENMASK(11, 0), "Tx Multiple Collision", }, { 0x0104, GENMASK(11, 0), "Tx Late Collision Packet", }, { 0x0108, GENMASK(11, 0), "Tx Excessive Collision Packet", }, { 0x010C, GENMASK(12, 0), "Tx Total Collision", }, { 0x0110, GENMASK(11, 0), "Tx Pause Frames Honored", }, { 0x0114, GENMASK(11, 0), "Tx Drop Frame", }, { 0x0118, GENMASK(11, 0), "Tx Jabber Frame", }, { 0x011C, GENMASK(11, 0), "Tx FCS Error", }, { 0x0120, GENMASK(11, 0), "Tx Control Frame", }, { 0x0124, GENMASK(11, 0), "Tx Oversize Frame", }, { 0x0128, GENMASK(11, 0), "Tx Undersize Frame", }, { 0x012C, GENMASK(11, 0), "Tx Fragment", }, }; #define DESC_EMPTY BIT(31) #define DESC_MORE BIT(24) #define DESC_PKTLEN_M 0xfff struct ag71xx_desc { u32 data; u32 ctrl; u32 next; u32 pad; } __aligned(4); #define AG71XX_DESC_SIZE roundup(sizeof(struct ag71xx_desc), \ L1_CACHE_BYTES) struct ag71xx_buf { union { struct { struct sk_buff *skb; unsigned int len; } tx; struct { dma_addr_t dma_addr; void *rx_buf; } rx; }; }; struct ag71xx_ring { /* "Hot" fields in the data path. */ unsigned int curr; unsigned int dirty; /* "Cold" fields - not used in the data path. */ struct ag71xx_buf *buf; u16 order; u16 desc_split; dma_addr_t descs_dma; u8 *descs_cpu; }; enum ag71xx_type { AR7100, AR7240, AR9130, AR9330, AR9340, QCA9530, QCA9550, }; struct ag71xx_dcfg { u32 max_frame_len; const u32 *fifodata; u16 desc_pktlen_mask; bool tx_hang_workaround; enum ag71xx_type type; }; struct ag71xx { /* Critical data related to the per-packet data path are clustered * early in this structure to help improve the D-cache footprint. */ struct ag71xx_ring rx_ring ____cacheline_aligned; struct ag71xx_ring tx_ring ____cacheline_aligned; u16 rx_buf_size; u8 rx_buf_offset; struct net_device *ndev; struct platform_device *pdev; struct napi_struct napi; u32 msg_enable; const struct ag71xx_dcfg *dcfg; /* From this point onwards we're not looking at per-packet fields. */ void __iomem *mac_base; struct ag71xx_desc *stop_desc; dma_addr_t stop_desc_dma; phy_interface_t phy_if_mode; struct phylink *phylink; struct phylink_config phylink_config; struct delayed_work restart_work; struct timer_list oom_timer; struct reset_control *mac_reset; u32 fifodata[3]; int mac_idx; struct reset_control *mdio_reset; struct mii_bus *mii_bus; struct clk *clk_mdio; struct clk *clk_eth; }; static int ag71xx_desc_empty(struct ag71xx_desc *desc) { return (desc->ctrl & DESC_EMPTY) != 0; } static struct ag71xx_desc *ag71xx_ring_desc(struct ag71xx_ring *ring, int idx) { return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE]; } static int ag71xx_ring_size_order(int size) { return fls(size - 1); } static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type) { return ag->dcfg->type == type; } static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value) { iowrite32(value, ag->mac_base + reg); /* flush write */ (void)ioread32(ag->mac_base + reg); } static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg) { return ioread32(ag->mac_base + reg); } static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask) { void __iomem *r; r = ag->mac_base + reg; iowrite32(ioread32(r) | mask, r); /* flush write */ (void)ioread32(r); } static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask) { void __iomem *r; r = ag->mac_base + reg; iowrite32(ioread32(r) & ~mask, r); /* flush write */ (void)ioread32(r); } static void ag71xx_int_enable(struct ag71xx *ag, u32 ints) { ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints); } static void ag71xx_int_disable(struct ag71xx *ag, u32 ints) { ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints); } static void ag71xx_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { struct ag71xx *ag = netdev_priv(ndev); strscpy(info->driver, "ag71xx", sizeof(info->driver)); strscpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node), sizeof(info->bus_info)); } static int ag71xx_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *kset) { struct ag71xx *ag = netdev_priv(ndev); return phylink_ethtool_ksettings_get(ag->phylink, kset); } static int ag71xx_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *kset) { struct ag71xx *ag = netdev_priv(ndev); return phylink_ethtool_ksettings_set(ag->phylink, kset); } static int ag71xx_ethtool_nway_reset(struct net_device *ndev) { struct ag71xx *ag = netdev_priv(ndev); return phylink_ethtool_nway_reset(ag->phylink); } static void ag71xx_ethtool_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { struct ag71xx *ag = netdev_priv(ndev); phylink_ethtool_get_pauseparam(ag->phylink, pause); } static int ag71xx_ethtool_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { struct ag71xx *ag = netdev_priv(ndev); return phylink_ethtool_set_pauseparam(ag->phylink, pause); } static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset, u8 *data) { int i; switch (sset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++) memcpy(data + i * ETH_GSTRING_LEN, ag71xx_statistics[i].name, ETH_GSTRING_LEN); break; case ETH_SS_TEST: net_selftest_get_strings(data); break; } } static void ag71xx_ethtool_get_stats(struct net_device *ndev, struct ethtool_stats *stats, u64 *data) { struct ag71xx *ag = netdev_priv(ndev); int i; for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++) *data++ = ag71xx_rr(ag, ag71xx_statistics[i].offset) & ag71xx_statistics[i].mask; } static int ag71xx_ethtool_get_sset_count(struct net_device *ndev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(ag71xx_statistics); case ETH_SS_TEST: return net_selftest_get_count(); default: return -EOPNOTSUPP; } } static const struct ethtool_ops ag71xx_ethtool_ops = { .get_drvinfo = ag71xx_get_drvinfo, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, .get_link_ksettings = ag71xx_get_link_ksettings, .set_link_ksettings = ag71xx_set_link_ksettings, .nway_reset = ag71xx_ethtool_nway_reset, .get_pauseparam = ag71xx_ethtool_get_pauseparam, .set_pauseparam = ag71xx_ethtool_set_pauseparam, .get_strings = ag71xx_ethtool_get_strings, .get_ethtool_stats = ag71xx_ethtool_get_stats, .get_sset_count = ag71xx_ethtool_get_sset_count, .self_test = net_selftest, }; static int ag71xx_mdio_wait_busy(struct ag71xx *ag) { struct net_device *ndev = ag->ndev; int i; for (i = 0; i < AG71XX_MDIO_RETRY; i++) { u32 busy; udelay(AG71XX_MDIO_DELAY); busy = ag71xx_rr(ag, AG71XX_REG_MII_IND); if (!busy) return 0; udelay(AG71XX_MDIO_DELAY); } netif_err(ag, link, ndev, "MDIO operation timed out\n"); return -ETIMEDOUT; } static int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg) { struct ag71xx *ag = bus->priv; int err, val; err = ag71xx_mdio_wait_busy(ag); if (err) return err; ag71xx_wr(ag, AG71XX_REG_MII_ADDR, ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff)); /* enable read mode */ ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ); err = ag71xx_mdio_wait_busy(ag); if (err) return err; val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS); /* disable read mode */ ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0); netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n", addr, reg, val); return val; } static int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg, u16 val) { struct ag71xx *ag = bus->priv; netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n", addr, reg, val); ag71xx_wr(ag, AG71XX_REG_MII_ADDR, ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff)); ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val); return ag71xx_mdio_wait_busy(ag); } static const u32 ar71xx_mdio_div_table[] = { 4, 4, 6, 8, 10, 14, 20, 28, }; static const u32 ar7240_mdio_div_table[] = { 2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96, }; static const u32 ar933x_mdio_div_table[] = { 4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98, }; static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div) { unsigned long ref_clock; const u32 *table; int ndivs, i; ref_clock = clk_get_rate(ag->clk_mdio); if (!ref_clock) return -EINVAL; if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) { table = ar933x_mdio_div_table; ndivs = ARRAY_SIZE(ar933x_mdio_div_table); } else if (ag71xx_is(ag, AR7240)) { table = ar7240_mdio_div_table; ndivs = ARRAY_SIZE(ar7240_mdio_div_table); } else { table = ar71xx_mdio_div_table; ndivs = ARRAY_SIZE(ar71xx_mdio_div_table); } for (i = 0; i < ndivs; i++) { unsigned long t; t = ref_clock / table[i]; if (t <= AG71XX_MDIO_MAX_CLK) { *div = i; return 0; } } return -ENOENT; } static int ag71xx_mdio_reset(struct mii_bus *bus) { struct ag71xx *ag = bus->priv; int err; u32 t; err = ag71xx_mdio_get_divider(ag, &t); if (err) return err; ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET); usleep_range(100, 200); ag71xx_wr(ag, AG71XX_REG_MII_CFG, t); usleep_range(100, 200); return 0; } static int ag71xx_mdio_probe(struct ag71xx *ag) { struct device *dev = &ag->pdev->dev; struct net_device *ndev = ag->ndev; static struct mii_bus *mii_bus; struct device_node *np, *mnp; int err; np = dev->of_node; ag->mii_bus = NULL; ag->clk_mdio = devm_clk_get(dev, "mdio"); if (IS_ERR(ag->clk_mdio)) { netif_err(ag, probe, ndev, "Failed to get mdio clk.\n"); return PTR_ERR(ag->clk_mdio); } err = clk_prepare_enable(ag->clk_mdio); if (err) { netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n"); return err; } mii_bus = devm_mdiobus_alloc(dev); if (!mii_bus) { err = -ENOMEM; goto mdio_err_put_clk; } ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio"); if (IS_ERR(ag->mdio_reset)) { netif_err(ag, probe, ndev, "Failed to get reset mdio.\n"); err = PTR_ERR(ag->mdio_reset); goto mdio_err_put_clk; } mii_bus->name = "ag71xx_mdio"; mii_bus->read = ag71xx_mdio_mii_read; mii_bus->write = ag71xx_mdio_mii_write; mii_bus->reset = ag71xx_mdio_reset; mii_bus->priv = ag; mii_bus->parent = dev; snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx); if (!IS_ERR(ag->mdio_reset)) { reset_control_assert(ag->mdio_reset); msleep(100); reset_control_deassert(ag->mdio_reset); msleep(200); } mnp = of_get_child_by_name(np, "mdio"); err = of_mdiobus_register(mii_bus, mnp); of_node_put(mnp); if (err) goto mdio_err_put_clk; ag->mii_bus = mii_bus; return 0; mdio_err_put_clk: clk_disable_unprepare(ag->clk_mdio); return err; } static void ag71xx_mdio_remove(struct ag71xx *ag) { if (ag->mii_bus) mdiobus_unregister(ag->mii_bus); clk_disable_unprepare(ag->clk_mdio); } static void ag71xx_hw_stop(struct ag71xx *ag) { /* disable all interrupts and stop the rx/tx engine */ ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0); ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0); ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0); } static bool ag71xx_check_dma_stuck(struct ag71xx *ag) { unsigned long timestamp; u32 rx_sm, tx_sm, rx_fd; timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start); if (likely(time_before(jiffies, timestamp + HZ / 10))) return false; if (!netif_carrier_ok(ag->ndev)) return false; rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM); if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6) return true; tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM); rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH); if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) && ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0) return true; return false; } static int ag71xx_tx_packets(struct ag71xx *ag, bool flush, int budget) { struct ag71xx_ring *ring = &ag->tx_ring; int sent = 0, bytes_compl = 0, n = 0; struct net_device *ndev = ag->ndev; int ring_mask, ring_size; bool dma_stuck = false; ring_mask = BIT(ring->order) - 1; ring_size = BIT(ring->order); netif_dbg(ag, tx_queued, ndev, "processing TX ring\n"); while (ring->dirty + n != ring->curr) { struct ag71xx_desc *desc; struct sk_buff *skb; unsigned int i; i = (ring->dirty + n) & ring_mask; desc = ag71xx_ring_desc(ring, i); skb = ring->buf[i].tx.skb; if (!flush && !ag71xx_desc_empty(desc)) { if (ag->dcfg->tx_hang_workaround && ag71xx_check_dma_stuck(ag)) { schedule_delayed_work(&ag->restart_work, HZ / 2); dma_stuck = true; } break; } if (flush) desc->ctrl |= DESC_EMPTY; n++; if (!skb) continue; napi_consume_skb(skb, budget); ring->buf[i].tx.skb = NULL; bytes_compl += ring->buf[i].tx.len; sent++; ring->dirty += n; while (n > 0) { ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); n--; } } netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent); if (!sent) return 0; ag->ndev->stats.tx_bytes += bytes_compl; ag->ndev->stats.tx_packets += sent; netdev_completed_queue(ag->ndev, sent, bytes_compl); if ((ring->curr - ring->dirty) < (ring_size * 3) / 4) netif_wake_queue(ag->ndev); if (!dma_stuck) cancel_delayed_work(&ag->restart_work); return sent; } static void ag71xx_dma_wait_stop(struct ag71xx *ag) { struct net_device *ndev = ag->ndev; int i; for (i = 0; i < AG71XX_DMA_RETRY; i++) { u32 rx, tx; mdelay(AG71XX_DMA_DELAY); rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE; tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE; if (!rx && !tx) return; } netif_err(ag, hw, ndev, "DMA stop operation timed out\n"); } static void ag71xx_dma_reset(struct ag71xx *ag) { struct net_device *ndev = ag->ndev; u32 val; int i; /* stop RX and TX */ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0); ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0); /* give the hardware some time to really stop all rx/tx activity * clearing the descriptors too early causes random memory corruption */ ag71xx_dma_wait_stop(ag); /* clear descriptor addresses */ ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma); ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma); /* clear pending RX/TX interrupts */ for (i = 0; i < 256; i++) { ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); } /* clear pending errors */ ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF); ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR); val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); if (val) netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n", val); val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); /* mask out reserved bits */ val &= ~0xff000000; if (val) netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n", val); } static void ag71xx_hw_setup(struct ag71xx *ag) { u32 init = MAC_CFG1_INIT; /* setup MAC configuration registers */ ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init); ag71xx_sb(ag, AG71XX_REG_MAC_CFG2, MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK); /* setup max frame length to zero */ ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0); /* setup FIFO configuration registers */ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT); } static unsigned int ag71xx_max_frame_len(unsigned int mtu) { return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN; } static void ag71xx_hw_set_macaddr(struct ag71xx *ag, const unsigned char *mac) { u32 t; t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16) | (((u32)mac[3]) << 8) | ((u32)mac[2]); ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t); t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16); ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t); } static void ag71xx_fast_reset(struct ag71xx *ag) { struct net_device *dev = ag->ndev; u32 rx_ds; u32 mii_reg; ag71xx_hw_stop(ag); mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG); rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC); ag71xx_tx_packets(ag, true, 0); reset_control_assert(ag->mac_reset); usleep_range(10, 20); reset_control_deassert(ag->mac_reset); usleep_range(10, 20); ag71xx_dma_reset(ag); ag71xx_hw_setup(ag); ag->tx_ring.curr = 0; ag->tx_ring.dirty = 0; netdev_reset_queue(ag->ndev); /* setup max frame length */ ag71xx_wr(ag, AG71XX_REG_MAC_MFL, ag71xx_max_frame_len(ag->ndev->mtu)); ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds); ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg); ag71xx_hw_set_macaddr(ag, dev->dev_addr); } static void ag71xx_hw_start(struct ag71xx *ag) { /* start RX engine */ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); /* enable interrupts */ ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT); netif_wake_queue(ag->ndev); } static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); if (phylink_autoneg_inband(mode)) return; if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130)) ag71xx_fast_reset(ag); if (ag->tx_ring.desc_split) { ag->fifodata[2] &= 0xffff; ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16; } ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]); } static void ag71xx_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); ag71xx_hw_stop(ag); } static void ag71xx_mac_link_up(struct phylink_config *config, struct phy_device *phy, unsigned int mode, phy_interface_t interface, int speed, int duplex, bool tx_pause, bool rx_pause) { struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); u32 cfg1, cfg2; u32 ifctl; u32 fifo5; cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2); cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX); cfg2 |= duplex ? MAC_CFG2_FDX : 0; ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL); ifctl &= ~(MAC_IFCTL_SPEED); fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5); fifo5 &= ~FIFO_CFG5_BM; switch (speed) { case SPEED_1000: cfg2 |= MAC_CFG2_IF_1000; fifo5 |= FIFO_CFG5_BM; break; case SPEED_100: cfg2 |= MAC_CFG2_IF_10_100; ifctl |= MAC_IFCTL_SPEED; break; case SPEED_10: cfg2 |= MAC_CFG2_IF_10_100; break; default: return; } ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5); ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl); cfg1 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG1); cfg1 &= ~(MAC_CFG1_TFC | MAC_CFG1_RFC); if (tx_pause) cfg1 |= MAC_CFG1_TFC; if (rx_pause) cfg1 |= MAC_CFG1_RFC; ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, cfg1); ag71xx_hw_start(ag); } static const struct phylink_mac_ops ag71xx_phylink_mac_ops = { .mac_config = ag71xx_mac_config, .mac_link_down = ag71xx_mac_link_down, .mac_link_up = ag71xx_mac_link_up, }; static int ag71xx_phylink_setup(struct ag71xx *ag) { struct phylink *phylink; ag->phylink_config.dev = &ag->ndev->dev; ag->phylink_config.type = PHYLINK_NETDEV; ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) || ag71xx_is(ag, AR9340) || ag71xx_is(ag, QCA9530) || (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1)) __set_bit(PHY_INTERFACE_MODE_MII, ag->phylink_config.supported_interfaces); if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) || (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) || (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1)) __set_bit(PHY_INTERFACE_MODE_GMII, ag->phylink_config.supported_interfaces); if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0) __set_bit(PHY_INTERFACE_MODE_SGMII, ag->phylink_config.supported_interfaces); if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0) __set_bit(PHY_INTERFACE_MODE_RMII, ag->phylink_config.supported_interfaces); if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) || (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1)) __set_bit(PHY_INTERFACE_MODE_RGMII, ag->phylink_config.supported_interfaces); phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode, ag->phy_if_mode, &ag71xx_phylink_mac_ops); if (IS_ERR(phylink)) return PTR_ERR(phylink); ag->phylink = phylink; return 0; } static void ag71xx_ring_tx_clean(struct ag71xx *ag) { struct ag71xx_ring *ring = &ag->tx_ring; int ring_mask = BIT(ring->order) - 1; u32 bytes_compl = 0, pkts_compl = 0; struct net_device *ndev = ag->ndev; while (ring->curr != ring->dirty) { struct ag71xx_desc *desc; u32 i = ring->dirty & ring_mask; desc = ag71xx_ring_desc(ring, i); if (!ag71xx_desc_empty(desc)) { desc->ctrl = 0; ndev->stats.tx_errors++; } if (ring->buf[i].tx.skb) { bytes_compl += ring->buf[i].tx.len; pkts_compl++; dev_kfree_skb_any(ring->buf[i].tx.skb); } ring->buf[i].tx.skb = NULL; ring->dirty++; } /* flush descriptors */ wmb(); netdev_completed_queue(ndev, pkts_compl, bytes_compl); } static void ag71xx_ring_tx_init(struct ag71xx *ag) { struct ag71xx_ring *ring = &ag->tx_ring; int ring_size = BIT(ring->order); int ring_mask = ring_size - 1; int i; for (i = 0; i < ring_size; i++) { struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); desc->next = (u32)(ring->descs_dma + AG71XX_DESC_SIZE * ((i + 1) & ring_mask)); desc->ctrl = DESC_EMPTY; ring->buf[i].tx.skb = NULL; } /* flush descriptors */ wmb(); ring->curr = 0; ring->dirty = 0; netdev_reset_queue(ag->ndev); } static void ag71xx_ring_rx_clean(struct ag71xx *ag) { struct ag71xx_ring *ring = &ag->rx_ring; int ring_size = BIT(ring->order); int i; if (!ring->buf) return; for (i = 0; i < ring_size; i++) if (ring->buf[i].rx.rx_buf) { dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr, ag->rx_buf_size, DMA_FROM_DEVICE); skb_free_frag(ring->buf[i].rx.rx_buf); } } static int ag71xx_buffer_size(struct ag71xx *ag) { return ag->rx_buf_size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); } static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf, int offset, void *(*alloc)(unsigned int size)) { struct ag71xx_ring *ring = &ag->rx_ring; struct ag71xx_desc *desc; void *data; desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]); data = alloc(ag71xx_buffer_size(ag)); if (!data) return false; buf->rx.rx_buf = data; buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size, DMA_FROM_DEVICE); desc->data = (u32)buf->rx.dma_addr + offset; return true; } static int ag71xx_ring_rx_init(struct ag71xx *ag) { struct ag71xx_ring *ring = &ag->rx_ring; struct net_device *ndev = ag->ndev; int ring_mask = BIT(ring->order) - 1; int ring_size = BIT(ring->order); unsigned int i; int ret; ret = 0; for (i = 0; i < ring_size; i++) { struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); desc->next = (u32)(ring->descs_dma + AG71XX_DESC_SIZE * ((i + 1) & ring_mask)); netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n", desc, desc->next); } for (i = 0; i < ring_size; i++) { struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset, netdev_alloc_frag)) { ret = -ENOMEM; break; } desc->ctrl = DESC_EMPTY; } /* flush descriptors */ wmb(); ring->curr = 0; ring->dirty = 0; return ret; } static int ag71xx_ring_rx_refill(struct ag71xx *ag) { struct ag71xx_ring *ring = &ag->rx_ring; int ring_mask = BIT(ring->order) - 1; int offset = ag->rx_buf_offset; unsigned int count; count = 0; for (; ring->curr - ring->dirty > 0; ring->dirty++) { struct ag71xx_desc *desc; unsigned int i; i = ring->dirty & ring_mask; desc = ag71xx_ring_desc(ring, i); if (!ring->buf[i].rx.rx_buf && !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset, napi_alloc_frag)) break; desc->ctrl = DESC_EMPTY; count++; } /* flush descriptors */ wmb(); netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n", count); return count; } static int ag71xx_rings_init(struct ag71xx *ag) { struct ag71xx_ring *tx = &ag->tx_ring; struct ag71xx_ring *rx = &ag->rx_ring; int ring_size, tx_size; ring_size = BIT(tx->order) + BIT(rx->order); tx_size = BIT(tx->order); tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL); if (!tx->buf) return -ENOMEM; tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE, &tx->descs_dma, GFP_KERNEL); if (!tx->descs_cpu) { kfree(tx->buf); tx->buf = NULL; return -ENOMEM; } rx->buf = &tx->buf[tx_size]; rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE; rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE; ag71xx_ring_tx_init(ag); return ag71xx_ring_rx_init(ag); } static void ag71xx_rings_free(struct ag71xx *ag) { struct ag71xx_ring *tx = &ag->tx_ring; struct ag71xx_ring *rx = &ag->rx_ring; int ring_size; ring_size = BIT(tx->order) + BIT(rx->order); if (tx->descs_cpu) dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE, tx->descs_cpu, tx->descs_dma); kfree(tx->buf); tx->descs_cpu = NULL; rx->descs_cpu = NULL; tx->buf = NULL; rx->buf = NULL; } static void ag71xx_rings_cleanup(struct ag71xx *ag) { ag71xx_ring_rx_clean(ag); ag71xx_ring_tx_clean(ag); ag71xx_rings_free(ag); netdev_reset_queue(ag->ndev); } static void ag71xx_hw_init(struct ag71xx *ag) { ag71xx_hw_stop(ag); ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR); usleep_range(20, 30); reset_control_assert(ag->mac_reset); msleep(100); reset_control_deassert(ag->mac_reset); msleep(200); ag71xx_hw_setup(ag); ag71xx_dma_reset(ag); } static int ag71xx_hw_enable(struct ag71xx *ag) { int ret; ret = ag71xx_rings_init(ag); if (ret) return ret; napi_enable(&ag->napi); ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma); netif_start_queue(ag->ndev); return 0; } static void ag71xx_hw_disable(struct ag71xx *ag) { netif_stop_queue(ag->ndev); ag71xx_hw_stop(ag); ag71xx_dma_reset(ag); napi_disable(&ag->napi); del_timer_sync(&ag->oom_timer); ag71xx_rings_cleanup(ag); } static int ag71xx_open(struct net_device *ndev) { struct ag71xx *ag = netdev_priv(ndev); unsigned int max_frame_len; int ret; ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0); if (ret) { netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n", ret); return ret; } max_frame_len = ag71xx_max_frame_len(ndev->mtu); ag->rx_buf_size = SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN); /* setup max frame length */ ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len); ag71xx_hw_set_macaddr(ag, ndev->dev_addr); ret = ag71xx_hw_enable(ag); if (ret) goto err; phylink_start(ag->phylink); return 0; err: ag71xx_rings_cleanup(ag); phylink_disconnect_phy(ag->phylink); return ret; } static int ag71xx_stop(struct net_device *ndev) { struct ag71xx *ag = netdev_priv(ndev); phylink_stop(ag->phylink); phylink_disconnect_phy(ag->phylink); ag71xx_hw_disable(ag); return 0; } static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len) { int i, ring_mask, ndesc, split; struct ag71xx_desc *desc; ring_mask = BIT(ring->order) - 1; ndesc = 0; split = ring->desc_split; if (!split) split = len; while (len > 0) { unsigned int cur_len = len; i = (ring->curr + ndesc) & ring_mask; desc = ag71xx_ring_desc(ring, i); if (!ag71xx_desc_empty(desc)) return -1; if (cur_len > split) { cur_len = split; /* TX will hang if DMA transfers <= 4 bytes, * make sure next segment is more than 4 bytes long. */ if (len <= split + 4) cur_len -= 4; } desc->data = addr; addr += cur_len; len -= cur_len; if (len > 0) cur_len |= DESC_MORE; /* prevent early tx attempt of this descriptor */ if (!ndesc) cur_len |= DESC_EMPTY; desc->ctrl = cur_len; ndesc++; } return ndesc; } static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) { int i, n, ring_min, ring_mask, ring_size; struct ag71xx *ag = netdev_priv(ndev); struct ag71xx_ring *ring; struct ag71xx_desc *desc; dma_addr_t dma_addr; ring = &ag->tx_ring; ring_mask = BIT(ring->order) - 1; ring_size = BIT(ring->order); if (skb->len <= 4) { netif_dbg(ag, tx_err, ndev, "packet len is too small\n"); goto err_drop; } dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); i = ring->curr & ring_mask; desc = ag71xx_ring_desc(ring, i); /* setup descriptor fields */ n = ag71xx_fill_dma_desc(ring, (u32)dma_addr, skb->len & ag->dcfg->desc_pktlen_mask); if (n < 0) goto err_drop_unmap; i = (ring->curr + n - 1) & ring_mask; ring->buf[i].tx.len = skb->len; ring->buf[i].tx.skb = skb; netdev_sent_queue(ndev, skb->len); skb_tx_timestamp(skb); desc->ctrl &= ~DESC_EMPTY; ring->curr += n; /* flush descriptor */ wmb(); ring_min = 2; if (ring->desc_split) ring_min *= AG71XX_TX_RING_DS_PER_PKT; if (ring->curr - ring->dirty >= ring_size - ring_min) { netif_dbg(ag, tx_err, ndev, "tx queue full\n"); netif_stop_queue(ndev); } netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n"); /* enable TX engine */ ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE); return NETDEV_TX_OK; err_drop_unmap: dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE); err_drop: ndev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static void ag71xx_oom_timer_handler(struct timer_list *t) { struct ag71xx *ag = from_timer(ag, t, oom_timer); napi_schedule(&ag->napi); } static void ag71xx_tx_timeout(struct net_device *ndev, unsigned int txqueue) { struct ag71xx *ag = netdev_priv(ndev); netif_err(ag, tx_err, ndev, "tx timeout\n"); schedule_delayed_work(&ag->restart_work, 1); } static void ag71xx_restart_work_func(struct work_struct *work) { struct ag71xx *ag = container_of(work, struct ag71xx, restart_work.work); rtnl_lock(); ag71xx_hw_disable(ag); ag71xx_hw_enable(ag); phylink_stop(ag->phylink); phylink_start(ag->phylink); rtnl_unlock(); } static int ag71xx_rx_packets(struct ag71xx *ag, int limit) { struct net_device *ndev = ag->ndev; int ring_mask, ring_size, done = 0; unsigned int pktlen_mask, offset; struct ag71xx_ring *ring; struct list_head rx_list; struct sk_buff *skb; ring = &ag->rx_ring; pktlen_mask = ag->dcfg->desc_pktlen_mask; offset = ag->rx_buf_offset; ring_mask = BIT(ring->order) - 1; ring_size = BIT(ring->order); netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n", limit, ring->curr, ring->dirty); INIT_LIST_HEAD(&rx_list); while (done < limit) { unsigned int i = ring->curr & ring_mask; struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); int pktlen; int err = 0; if (ag71xx_desc_empty(desc)) break; if ((ring->dirty + ring_size) == ring->curr) { WARN_ONCE(1, "RX out of ring"); break; } ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); pktlen = desc->ctrl & pktlen_mask; pktlen -= ETH_FCS_LEN; dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr, ag->rx_buf_size, DMA_FROM_DEVICE); ndev->stats.rx_packets++; ndev->stats.rx_bytes += pktlen; skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag)); if (!skb) { skb_free_frag(ring->buf[i].rx.rx_buf); goto next; } skb_reserve(skb, offset); skb_put(skb, pktlen); if (err) { ndev->stats.rx_dropped++; kfree_skb(skb); } else { skb->dev = ndev; skb->ip_summed = CHECKSUM_NONE; list_add_tail(&skb->list, &rx_list); } next: ring->buf[i].rx.rx_buf = NULL; done++; ring->curr++; } ag71xx_ring_rx_refill(ag); list_for_each_entry(skb, &rx_list, list) skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb_list(&rx_list); netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n", ring->curr, ring->dirty, done); return done; } static int ag71xx_poll(struct napi_struct *napi, int limit) { struct ag71xx *ag = container_of(napi, struct ag71xx, napi); struct ag71xx_ring *rx_ring = &ag->rx_ring; int rx_ring_size = BIT(rx_ring->order); struct net_device *ndev = ag->ndev; int tx_done, rx_done; u32 status; tx_done = ag71xx_tx_packets(ag, false, limit); netif_dbg(ag, rx_status, ndev, "processing RX ring\n"); rx_done = ag71xx_rx_packets(ag, limit); if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf) goto oom; status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); if (unlikely(status & RX_STATUS_OF)) { ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF); ndev->stats.rx_fifo_errors++; /* restart RX */ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); } if (rx_done < limit) { if (status & RX_STATUS_PR) goto more; status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); if (status & TX_STATUS_PS) goto more; netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n", rx_done, tx_done, limit); napi_complete(napi); /* enable interrupts */ ag71xx_int_enable(ag, AG71XX_INT_POLL); return rx_done; } more: netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n", rx_done, tx_done, limit); return limit; oom: netif_err(ag, rx_err, ndev, "out of memory\n"); mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL); napi_complete(napi); return 0; } static irqreturn_t ag71xx_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct ag71xx *ag; u32 status; ag = netdev_priv(ndev); status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS); if (unlikely(!status)) return IRQ_NONE; if (unlikely(status & AG71XX_INT_ERR)) { if (status & AG71XX_INT_TX_BE) { ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE); netif_err(ag, intr, ndev, "TX BUS error\n"); } if (status & AG71XX_INT_RX_BE) { ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE); netif_err(ag, intr, ndev, "RX BUS error\n"); } } if (likely(status & AG71XX_INT_POLL)) { ag71xx_int_disable(ag, AG71XX_INT_POLL); netif_dbg(ag, intr, ndev, "enable polling mode\n"); napi_schedule(&ag->napi); } return IRQ_HANDLED; } static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu) { struct ag71xx *ag = netdev_priv(ndev); ndev->mtu = new_mtu; ag71xx_wr(ag, AG71XX_REG_MAC_MFL, ag71xx_max_frame_len(ndev->mtu)); return 0; } static const struct net_device_ops ag71xx_netdev_ops = { .ndo_open = ag71xx_open, .ndo_stop = ag71xx_stop, .ndo_start_xmit = ag71xx_hard_start_xmit, .ndo_eth_ioctl = phy_do_ioctl, .ndo_tx_timeout = ag71xx_tx_timeout, .ndo_change_mtu = ag71xx_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static const u32 ar71xx_addr_ar7100[] = { 0x19000000, 0x1a000000, }; static int ag71xx_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; const struct ag71xx_dcfg *dcfg; struct net_device *ndev; struct resource *res; int tx_size, err, i; struct ag71xx *ag; if (!np) return -ENODEV; ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag)); if (!ndev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -EINVAL; dcfg = of_device_get_match_data(&pdev->dev); if (!dcfg) return -EINVAL; ag = netdev_priv(ndev); ag->mac_idx = -1; for (i = 0; i < ARRAY_SIZE(ar71xx_addr_ar7100); i++) { if (ar71xx_addr_ar7100[i] == res->start) ag->mac_idx = i; } if (ag->mac_idx < 0) { netif_err(ag, probe, ndev, "unknown mac idx\n"); return -EINVAL; } ag->clk_eth = devm_clk_get(&pdev->dev, "eth"); if (IS_ERR(ag->clk_eth)) { netif_err(ag, probe, ndev, "Failed to get eth clk.\n"); return PTR_ERR(ag->clk_eth); } SET_NETDEV_DEV(ndev, &pdev->dev); ag->pdev = pdev; ag->ndev = ndev; ag->dcfg = dcfg; ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE); memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata)); ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac"); if (IS_ERR(ag->mac_reset)) { netif_err(ag, probe, ndev, "missing mac reset\n"); return PTR_ERR(ag->mac_reset); } ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!ag->mac_base) return -ENOMEM; ndev->irq = platform_get_irq(pdev, 0); err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt, 0x0, dev_name(&pdev->dev), ndev); if (err) { netif_err(ag, probe, ndev, "unable to request IRQ %d\n", ndev->irq); return err; } ndev->netdev_ops = &ag71xx_netdev_ops; ndev->ethtool_ops = &ag71xx_ethtool_ops; INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func); timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0); tx_size = AG71XX_TX_RING_SIZE_DEFAULT; ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT); ndev->min_mtu = 68; ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0); ag->rx_buf_offset = NET_SKB_PAD; if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130)) ag->rx_buf_offset += NET_IP_ALIGN; if (ag71xx_is(ag, AR7100)) { ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT; tx_size *= AG71XX_TX_RING_DS_PER_PKT; } ag->tx_ring.order = ag71xx_ring_size_order(tx_size); ag->stop_desc = dmam_alloc_coherent(&pdev->dev, sizeof(struct ag71xx_desc), &ag->stop_desc_dma, GFP_KERNEL); if (!ag->stop_desc) return -ENOMEM; ag->stop_desc->data = 0; ag->stop_desc->ctrl = 0; ag->stop_desc->next = (u32)ag->stop_desc_dma; err = of_get_ethdev_address(np, ndev); if (err) { netif_err(ag, probe, ndev, "invalid MAC address, using random address\n"); eth_hw_addr_random(ndev); } err = of_get_phy_mode(np, &ag->phy_if_mode); if (err) { netif_err(ag, probe, ndev, "missing phy-mode property in DT\n"); return err; } netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT); err = clk_prepare_enable(ag->clk_eth); if (err) { netif_err(ag, probe, ndev, "Failed to enable eth clk.\n"); return err; } ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0); ag71xx_hw_init(ag); err = ag71xx_mdio_probe(ag); if (err) goto err_put_clk; platform_set_drvdata(pdev, ndev); err = ag71xx_phylink_setup(ag); if (err) { netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err); goto err_mdio_remove; } err = register_netdev(ndev); if (err) { netif_err(ag, probe, ndev, "unable to register net device\n"); platform_set_drvdata(pdev, NULL); goto err_mdio_remove; } netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n", (unsigned long)ag->mac_base, ndev->irq, phy_modes(ag->phy_if_mode)); return 0; err_mdio_remove: ag71xx_mdio_remove(ag); err_put_clk: clk_disable_unprepare(ag->clk_eth); return err; } static int ag71xx_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ag71xx *ag; if (!ndev) return 0; ag = netdev_priv(ndev); unregister_netdev(ndev); ag71xx_mdio_remove(ag); clk_disable_unprepare(ag->clk_eth); platform_set_drvdata(pdev, NULL); return 0; } static const u32 ar71xx_fifo_ar7100[] = { 0x0fff0000, 0x00001fff, 0x00780fff, }; static const u32 ar71xx_fifo_ar9130[] = { 0x0fff0000, 0x00001fff, 0x008001ff, }; static const u32 ar71xx_fifo_ar9330[] = { 0x0010ffff, 0x015500aa, 0x01f00140, }; static const struct ag71xx_dcfg ag71xx_dcfg_ar7100 = { .type = AR7100, .fifodata = ar71xx_fifo_ar7100, .max_frame_len = 1540, .desc_pktlen_mask = SZ_4K - 1, .tx_hang_workaround = false, }; static const struct ag71xx_dcfg ag71xx_dcfg_ar7240 = { .type = AR7240, .fifodata = ar71xx_fifo_ar7100, .max_frame_len = 1540, .desc_pktlen_mask = SZ_4K - 1, .tx_hang_workaround = true, }; static const struct ag71xx_dcfg ag71xx_dcfg_ar9130 = { .type = AR9130, .fifodata = ar71xx_fifo_ar9130, .max_frame_len = 1540, .desc_pktlen_mask = SZ_4K - 1, .tx_hang_workaround = false, }; static const struct ag71xx_dcfg ag71xx_dcfg_ar9330 = { .type = AR9330, .fifodata = ar71xx_fifo_ar9330, .max_frame_len = 1540, .desc_pktlen_mask = SZ_4K - 1, .tx_hang_workaround = true, }; static const struct ag71xx_dcfg ag71xx_dcfg_ar9340 = { .type = AR9340, .fifodata = ar71xx_fifo_ar9330, .max_frame_len = SZ_16K - 1, .desc_pktlen_mask = SZ_16K - 1, .tx_hang_workaround = true, }; static const struct ag71xx_dcfg ag71xx_dcfg_qca9530 = { .type = QCA9530, .fifodata = ar71xx_fifo_ar9330, .max_frame_len = SZ_16K - 1, .desc_pktlen_mask = SZ_16K - 1, .tx_hang_workaround = true, }; static const struct ag71xx_dcfg ag71xx_dcfg_qca9550 = { .type = QCA9550, .fifodata = ar71xx_fifo_ar9330, .max_frame_len = 1540, .desc_pktlen_mask = SZ_16K - 1, .tx_hang_workaround = true, }; static const struct of_device_id ag71xx_match[] = { { .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 }, { .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 }, { .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 }, { .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 }, { .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 }, { .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 }, { .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 }, { .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 }, { .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 }, { .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 }, {} }; static struct platform_driver ag71xx_driver = { .probe = ag71xx_probe, .remove = ag71xx_remove, .driver = { .name = "ag71xx", .of_match_table = ag71xx_match, } }; module_platform_driver(ag71xx_driver); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/ethernet/atheros/ag71xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 2006 - 2007 Atheros Corporation. All rights reserved. * Copyright(c) 2007 - 2008 Chris Snook <[email protected]> * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. */ #include <linux/atomic.h> #include <linux/crc32.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/hardirq.h> #include <linux/if_vlan.h> #include <linux/in.h> #include <linux/interrupt.h> #include <linux/ip.h> #include <linux/irqflags.h> #include <linux/irqreturn.h> #include <linux/mii.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/pm.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/tcp.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/workqueue.h> #include "atl2.h" static const char atl2_driver_name[] = "atl2"; static const struct ethtool_ops atl2_ethtool_ops; MODULE_AUTHOR("Atheros Corporation <[email protected]>, Chris Snook <[email protected]>"); MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver"); MODULE_LICENSE("GPL"); /* * atl2_pci_tbl - PCI Device ID Table */ static const struct pci_device_id atl2_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)}, /* required last entry */ {0,} }; MODULE_DEVICE_TABLE(pci, atl2_pci_tbl); static void atl2_check_options(struct atl2_adapter *adapter); /** * atl2_sw_init - Initialize general software structures (struct atl2_adapter) * @adapter: board private structure to initialize * * atl2_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). */ static int atl2_sw_init(struct atl2_adapter *adapter) { struct atl2_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_id = pdev->subsystem_device; hw->revision_id = pdev->revision; pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); adapter->wol = 0; adapter->ict = 50000; /* ~100ms */ adapter->link_speed = SPEED_0; /* hardware init */ adapter->link_duplex = FULL_DUPLEX; hw->phy_configured = false; hw->preamble_len = 7; hw->ipgt = 0x60; hw->min_ifg = 0x50; hw->ipgr1 = 0x40; hw->ipgr2 = 0x60; hw->retry_buf = 2; hw->max_retry = 0xf; hw->lcol = 0x37; hw->jam_ipg = 7; hw->fc_rxd_hi = 0; hw->fc_rxd_lo = 0; hw->max_frame_size = adapter->netdev->mtu; spin_lock_init(&adapter->stats_lock); set_bit(__ATL2_DOWN, &adapter->flags); return 0; } /** * atl2_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * * The set_multi entry point is called whenever the multicast address * list or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper multicast, * promiscuous mode, and all-multi behavior. */ static void atl2_set_multi(struct net_device *netdev) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; u32 rctl; u32 hash_value; /* Check for Promiscuous and All Multicast modes */ rctl = ATL2_READ_REG(hw, REG_MAC_CTRL); if (netdev->flags & IFF_PROMISC) { rctl |= MAC_CTRL_PROMIS_EN; } else if (netdev->flags & IFF_ALLMULTI) { rctl |= MAC_CTRL_MC_ALL_EN; rctl &= ~MAC_CTRL_PROMIS_EN; } else rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); ATL2_WRITE_REG(hw, REG_MAC_CTRL, rctl); /* clear the old settings from the multicast hash table */ ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); /* comoute mc addresses' hash value ,and put it into hash table */ netdev_for_each_mc_addr(ha, netdev) { hash_value = atl2_hash_mc_addr(hw, ha->addr); atl2_hash_set(hw, hash_value); } } static void init_ring_ptrs(struct atl2_adapter *adapter) { /* Read / Write Ptr Initialize: */ adapter->txd_write_ptr = 0; atomic_set(&adapter->txd_read_ptr, 0); adapter->rxd_read_ptr = 0; adapter->rxd_write_ptr = 0; atomic_set(&adapter->txs_write_ptr, 0); adapter->txs_next_clear = 0; } /** * atl2_configure - Configure Transmit&Receive Unit after Reset * @adapter: board private structure * * Configure the Tx /Rx unit of the MAC after a reset. */ static int atl2_configure(struct atl2_adapter *adapter) { struct atl2_hw *hw = &adapter->hw; u32 value; /* clear interrupt status */ ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0xffffffff); /* set MAC Address */ value = (((u32)hw->mac_addr[2]) << 24) | (((u32)hw->mac_addr[3]) << 16) | (((u32)hw->mac_addr[4]) << 8) | (((u32)hw->mac_addr[5])); ATL2_WRITE_REG(hw, REG_MAC_STA_ADDR, value); value = (((u32)hw->mac_addr[0]) << 8) | (((u32)hw->mac_addr[1])); ATL2_WRITE_REG(hw, (REG_MAC_STA_ADDR+4), value); /* HI base address */ ATL2_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, (u32)((adapter->ring_dma & 0xffffffff00000000ULL) >> 32)); /* LO base address */ ATL2_WRITE_REG(hw, REG_TXD_BASE_ADDR_LO, (u32)(adapter->txd_dma & 0x00000000ffffffffULL)); ATL2_WRITE_REG(hw, REG_TXS_BASE_ADDR_LO, (u32)(adapter->txs_dma & 0x00000000ffffffffULL)); ATL2_WRITE_REG(hw, REG_RXD_BASE_ADDR_LO, (u32)(adapter->rxd_dma & 0x00000000ffffffffULL)); /* element count */ ATL2_WRITE_REGW(hw, REG_TXD_MEM_SIZE, (u16)(adapter->txd_ring_size/4)); ATL2_WRITE_REGW(hw, REG_TXS_MEM_SIZE, (u16)adapter->txs_ring_size); ATL2_WRITE_REGW(hw, REG_RXD_BUF_NUM, (u16)adapter->rxd_ring_size); /* config Internal SRAM */ /* ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_tx_end); ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_rx_end); */ /* config IPG/IFG */ value = (((u32)hw->ipgt & MAC_IPG_IFG_IPGT_MASK) << MAC_IPG_IFG_IPGT_SHIFT) | (((u32)hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) << MAC_IPG_IFG_MIFG_SHIFT) | (((u32)hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) << MAC_IPG_IFG_IPGR1_SHIFT)| (((u32)hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) << MAC_IPG_IFG_IPGR2_SHIFT); ATL2_WRITE_REG(hw, REG_MAC_IPG_IFG, value); /* config Half-Duplex Control */ value = ((u32)hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | (((u32)hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | (((u32)hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); ATL2_WRITE_REG(hw, REG_MAC_HALF_DUPLX_CTRL, value); /* set Interrupt Moderator Timer */ ATL2_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, adapter->imt); ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_ITIMER_EN); /* set Interrupt Clear Timer */ ATL2_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, adapter->ict); /* set MTU */ ATL2_WRITE_REG(hw, REG_MTU, adapter->netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); /* 1590 */ ATL2_WRITE_REG(hw, REG_TX_CUT_THRESH, 0x177); /* flow control */ ATL2_WRITE_REGW(hw, REG_PAUSE_ON_TH, hw->fc_rxd_hi); ATL2_WRITE_REGW(hw, REG_PAUSE_OFF_TH, hw->fc_rxd_lo); /* Init mailbox */ ATL2_WRITE_REGW(hw, REG_MB_TXD_WR_IDX, (u16)adapter->txd_write_ptr); ATL2_WRITE_REGW(hw, REG_MB_RXD_RD_IDX, (u16)adapter->rxd_read_ptr); /* enable DMA read/write */ ATL2_WRITE_REGB(hw, REG_DMAR, DMAR_EN); ATL2_WRITE_REGB(hw, REG_DMAW, DMAW_EN); value = ATL2_READ_REG(&adapter->hw, REG_ISR); if ((value & ISR_PHY_LINKDOWN) != 0) value = 1; /* config failed */ else value = 0; /* clear all interrupt status */ ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0x3fffffff); ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0); return value; } /** * atl2_setup_ring_resources - allocate Tx / RX descriptor resources * @adapter: board private structure * * Return 0 on success, negative on failure */ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int size; u8 offset = 0; /* real ring DMA buffer */ adapter->ring_size = size = adapter->txd_ring_size * 1 + 7 + /* dword align */ adapter->txs_ring_size * 4 + 7 + /* dword align */ adapter->rxd_ring_size * 1536 + 127; /* 128bytes align */ adapter->ring_vir_addr = dma_alloc_coherent(&pdev->dev, size, &adapter->ring_dma, GFP_KERNEL); if (!adapter->ring_vir_addr) return -ENOMEM; /* Init TXD Ring */ adapter->txd_dma = adapter->ring_dma ; offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0; adapter->txd_dma += offset; adapter->txd_ring = adapter->ring_vir_addr + offset; /* Init TXS Ring */ adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size; offset = (adapter->txs_dma & 0x7) ? (8 - (adapter->txs_dma & 0x7)) : 0; adapter->txs_dma += offset; adapter->txs_ring = (struct tx_pkt_status *) (((u8 *)adapter->txd_ring) + (adapter->txd_ring_size + offset)); /* Init RXD Ring */ adapter->rxd_dma = adapter->txs_dma + adapter->txs_ring_size * 4; offset = (adapter->rxd_dma & 127) ? (128 - (adapter->rxd_dma & 127)) : 0; if (offset > 7) offset -= 8; else offset += (128 - 8); adapter->rxd_dma += offset; adapter->rxd_ring = (struct rx_desc *) (((u8 *)adapter->txs_ring) + (adapter->txs_ring_size * 4 + offset)); /* * Read / Write Ptr Initialize: * init_ring_ptrs(adapter); */ return 0; } /** * atl2_irq_enable - Enable default interrupt generation settings * @adapter: board private structure */ static inline void atl2_irq_enable(struct atl2_adapter *adapter) { ATL2_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK); ATL2_WRITE_FLUSH(&adapter->hw); } /** * atl2_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure */ static inline void atl2_irq_disable(struct atl2_adapter *adapter) { ATL2_WRITE_REG(&adapter->hw, REG_IMR, 0); ATL2_WRITE_FLUSH(&adapter->hw); synchronize_irq(adapter->pdev->irq); } static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl) { if (features & NETIF_F_HW_VLAN_CTAG_RX) { /* enable VLAN tag insert/strip */ *ctrl |= MAC_CTRL_RMV_VLAN; } else { /* disable VLAN tag insert/strip */ *ctrl &= ~MAC_CTRL_RMV_VLAN; } } static void atl2_vlan_mode(struct net_device *netdev, netdev_features_t features) { struct atl2_adapter *adapter = netdev_priv(netdev); u32 ctrl; atl2_irq_disable(adapter); ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL); __atl2_vlan_mode(features, &ctrl); ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl); atl2_irq_enable(adapter); } static void atl2_restore_vlan(struct atl2_adapter *adapter) { atl2_vlan_mode(adapter->netdev, adapter->netdev->features); } static netdev_features_t atl2_fix_features(struct net_device *netdev, netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ if (features & NETIF_F_HW_VLAN_CTAG_RX) features |= NETIF_F_HW_VLAN_CTAG_TX; else features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } static int atl2_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_CTAG_RX) atl2_vlan_mode(netdev, features); return 0; } static void atl2_intr_rx(struct atl2_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct rx_desc *rxd; struct sk_buff *skb; do { rxd = adapter->rxd_ring+adapter->rxd_write_ptr; if (!rxd->status.update) break; /* end of tx */ /* clear this flag at once */ rxd->status.update = 0; if (rxd->status.ok && rxd->status.pkt_size >= 60) { int rx_size = (int)(rxd->status.pkt_size - 4); /* alloc new buffer */ skb = netdev_alloc_skb_ip_align(netdev, rx_size); if (NULL == skb) { /* * Check that some rx space is free. If not, * free one and mark stats->rx_dropped++. */ netdev->stats.rx_dropped++; break; } memcpy(skb->data, rxd->packet, rx_size); skb_put(skb, rx_size); skb->protocol = eth_type_trans(skb, netdev); if (rxd->status.vlan) { u16 vlan_tag = (rxd->status.vtag>>4) | ((rxd->status.vtag&7) << 13) | ((rxd->status.vtag&8) << 9); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } netif_rx(skb); netdev->stats.rx_bytes += rx_size; netdev->stats.rx_packets++; } else { netdev->stats.rx_errors++; if (rxd->status.ok && rxd->status.pkt_size <= 60) netdev->stats.rx_length_errors++; if (rxd->status.mcast) netdev->stats.multicast++; if (rxd->status.crc) netdev->stats.rx_crc_errors++; if (rxd->status.align) netdev->stats.rx_frame_errors++; } /* advance write ptr */ if (++adapter->rxd_write_ptr == adapter->rxd_ring_size) adapter->rxd_write_ptr = 0; } while (1); /* update mailbox? */ adapter->rxd_read_ptr = adapter->rxd_write_ptr; ATL2_WRITE_REGW(&adapter->hw, REG_MB_RXD_RD_IDX, adapter->rxd_read_ptr); } static void atl2_intr_tx(struct atl2_adapter *adapter) { struct net_device *netdev = adapter->netdev; u32 txd_read_ptr; u32 txs_write_ptr; struct tx_pkt_status *txs; struct tx_pkt_header *txph; int free_hole = 0; do { txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr); txs = adapter->txs_ring + txs_write_ptr; if (!txs->update) break; /* tx stop here */ free_hole = 1; txs->update = 0; if (++txs_write_ptr == adapter->txs_ring_size) txs_write_ptr = 0; atomic_set(&adapter->txs_write_ptr, (int)txs_write_ptr); txd_read_ptr = (u32) atomic_read(&adapter->txd_read_ptr); txph = (struct tx_pkt_header *) (((u8 *)adapter->txd_ring) + txd_read_ptr); if (txph->pkt_size != txs->pkt_size) { struct tx_pkt_status *old_txs = txs; printk(KERN_WARNING "%s: txs packet size not consistent with txd" " txd_:0x%08x, txs_:0x%08x!\n", adapter->netdev->name, *(u32 *)txph, *(u32 *)txs); printk(KERN_WARNING "txd read ptr: 0x%x\n", txd_read_ptr); txs = adapter->txs_ring + txs_write_ptr; printk(KERN_WARNING "txs-behind:0x%08x\n", *(u32 *)txs); if (txs_write_ptr < 2) { txs = adapter->txs_ring + (adapter->txs_ring_size + txs_write_ptr - 2); } else { txs = adapter->txs_ring + (txs_write_ptr - 2); } printk(KERN_WARNING "txs-before:0x%08x\n", *(u32 *)txs); txs = old_txs; } /* 4for TPH */ txd_read_ptr += (((u32)(txph->pkt_size) + 7) & ~3); if (txd_read_ptr >= adapter->txd_ring_size) txd_read_ptr -= adapter->txd_ring_size; atomic_set(&adapter->txd_read_ptr, (int)txd_read_ptr); /* tx statistics: */ if (txs->ok) { netdev->stats.tx_bytes += txs->pkt_size; netdev->stats.tx_packets++; } else netdev->stats.tx_errors++; if (txs->defer) netdev->stats.collisions++; if (txs->abort_col) netdev->stats.tx_aborted_errors++; if (txs->late_col) netdev->stats.tx_window_errors++; if (txs->underrun) netdev->stats.tx_fifo_errors++; } while (1); if (free_hole) { if (netif_queue_stopped(adapter->netdev) && netif_carrier_ok(adapter->netdev)) netif_wake_queue(adapter->netdev); } } static void atl2_check_for_link(struct atl2_adapter *adapter) { struct net_device *netdev = adapter->netdev; u16 phy_data = 0; spin_lock(&adapter->stats_lock); atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); spin_unlock(&adapter->stats_lock); /* notify upper layer link down ASAP */ if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ if (netif_carrier_ok(netdev)) { /* old link state: Up */ printk(KERN_INFO "%s: %s NIC Link is Down\n", atl2_driver_name, netdev->name); adapter->link_speed = SPEED_0; netif_carrier_off(netdev); netif_stop_queue(netdev); } } schedule_work(&adapter->link_chg_task); } static inline void atl2_clear_phy_int(struct atl2_adapter *adapter) { u16 phy_data; spin_lock(&adapter->stats_lock); atl2_read_phy_reg(&adapter->hw, 19, &phy_data); spin_unlock(&adapter->stats_lock); } /** * atl2_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure */ static irqreturn_t atl2_intr(int irq, void *data) { struct atl2_adapter *adapter = netdev_priv(data); struct atl2_hw *hw = &adapter->hw; u32 status; status = ATL2_READ_REG(hw, REG_ISR); if (0 == status) return IRQ_NONE; /* link event */ if (status & ISR_PHY) atl2_clear_phy_int(adapter); /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ ATL2_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); /* check if PCIE PHY Link down */ if (status & ISR_PHY_LINKDOWN) { if (netif_running(adapter->netdev)) { /* reset MAC */ ATL2_WRITE_REG(hw, REG_ISR, 0); ATL2_WRITE_REG(hw, REG_IMR, 0); ATL2_WRITE_FLUSH(hw); schedule_work(&adapter->reset_task); return IRQ_HANDLED; } } /* check if DMA read/write error? */ if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { ATL2_WRITE_REG(hw, REG_ISR, 0); ATL2_WRITE_REG(hw, REG_IMR, 0); ATL2_WRITE_FLUSH(hw); schedule_work(&adapter->reset_task); return IRQ_HANDLED; } /* link event */ if (status & (ISR_PHY | ISR_MANUAL)) { adapter->netdev->stats.tx_carrier_errors++; atl2_check_for_link(adapter); } /* transmit event */ if (status & ISR_TX_EVENT) atl2_intr_tx(adapter); /* rx exception */ if (status & ISR_RX_EVENT) atl2_intr_rx(adapter); /* re-enable Interrupt */ ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0); return IRQ_HANDLED; } static int atl2_request_irq(struct atl2_adapter *adapter) { struct net_device *netdev = adapter->netdev; int flags, err = 0; flags = IRQF_SHARED; adapter->have_msi = true; err = pci_enable_msi(adapter->pdev); if (err) adapter->have_msi = false; if (adapter->have_msi) flags &= ~IRQF_SHARED; return request_irq(adapter->pdev->irq, atl2_intr, flags, netdev->name, netdev); } /** * atl2_free_ring_resources - Free Tx / RX descriptor Resources * @adapter: board private structure * * Free all transmit software resources */ static void atl2_free_ring_resources(struct atl2_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; dma_free_coherent(&pdev->dev, adapter->ring_size, adapter->ring_vir_addr, adapter->ring_dma); } /** * atl2_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. */ static int atl2_open(struct net_device *netdev) { struct atl2_adapter *adapter = netdev_priv(netdev); int err; u32 val; /* disallow open during test */ if (test_bit(__ATL2_TESTING, &adapter->flags)) return -EBUSY; /* allocate transmit descriptors */ err = atl2_setup_ring_resources(adapter); if (err) return err; err = atl2_init_hw(&adapter->hw); if (err) { err = -EIO; goto err_init_hw; } /* hardware has been reset, we need to reload some things */ atl2_set_multi(netdev); init_ring_ptrs(adapter); atl2_restore_vlan(adapter); if (atl2_configure(adapter)) { err = -EIO; goto err_config; } err = atl2_request_irq(adapter); if (err) goto err_req_irq; clear_bit(__ATL2_DOWN, &adapter->flags); mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 4*HZ)); val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL); ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val | MASTER_CTRL_MANUAL_INT); atl2_irq_enable(adapter); return 0; err_init_hw: err_req_irq: err_config: atl2_free_ring_resources(adapter); atl2_reset_hw(&adapter->hw); return err; } static void atl2_down(struct atl2_adapter *adapter) { struct net_device *netdev = adapter->netdev; /* signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ set_bit(__ATL2_DOWN, &adapter->flags); netif_tx_disable(netdev); /* reset MAC to disable all RX/TX */ atl2_reset_hw(&adapter->hw); msleep(1); atl2_irq_disable(adapter); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_config_timer); clear_bit(0, &adapter->cfg_phy); netif_carrier_off(netdev); adapter->link_speed = SPEED_0; adapter->link_duplex = -1; } static void atl2_free_irq(struct atl2_adapter *adapter) { struct net_device *netdev = adapter->netdev; free_irq(adapter->pdev->irq, netdev); #ifdef CONFIG_PCI_MSI if (adapter->have_msi) pci_disable_msi(adapter->pdev); #endif } /** * atl2_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. */ static int atl2_close(struct net_device *netdev) { struct atl2_adapter *adapter = netdev_priv(netdev); WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags)); atl2_down(adapter); atl2_free_irq(adapter); atl2_free_ring_resources(adapter); return 0; } static inline int TxsFreeUnit(struct atl2_adapter *adapter) { u32 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr); return (adapter->txs_next_clear >= txs_write_ptr) ? (int) (adapter->txs_ring_size - adapter->txs_next_clear + txs_write_ptr - 1) : (int) (txs_write_ptr - adapter->txs_next_clear - 1); } static inline int TxdFreeBytes(struct atl2_adapter *adapter) { u32 txd_read_ptr = (u32)atomic_read(&adapter->txd_read_ptr); return (adapter->txd_write_ptr >= txd_read_ptr) ? (int) (adapter->txd_ring_size - adapter->txd_write_ptr + txd_read_ptr - 1) : (int) (txd_read_ptr - adapter->txd_write_ptr - 1); } static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct atl2_adapter *adapter = netdev_priv(netdev); struct tx_pkt_header *txph; u32 offset, copy_len; int txs_unused; int txbuf_unused; if (test_bit(__ATL2_DOWN, &adapter->flags)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (unlikely(skb->len <= 0)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } txs_unused = TxsFreeUnit(adapter); txbuf_unused = TxdFreeBytes(adapter); if (skb->len + sizeof(struct tx_pkt_header) + 4 > txbuf_unused || txs_unused < 1) { /* not enough resources */ netif_stop_queue(netdev); return NETDEV_TX_BUSY; } offset = adapter->txd_write_ptr; txph = (struct tx_pkt_header *) (((u8 *)adapter->txd_ring) + offset); *(u32 *)txph = 0; txph->pkt_size = skb->len; offset += 4; if (offset >= adapter->txd_ring_size) offset -= adapter->txd_ring_size; copy_len = adapter->txd_ring_size - offset; if (copy_len >= skb->len) { memcpy(((u8 *)adapter->txd_ring) + offset, skb->data, skb->len); offset += ((u32)(skb->len + 3) & ~3); } else { memcpy(((u8 *)adapter->txd_ring)+offset, skb->data, copy_len); memcpy((u8 *)adapter->txd_ring, skb->data+copy_len, skb->len-copy_len); offset = ((u32)(skb->len-copy_len + 3) & ~3); } #ifdef NETIF_F_HW_VLAN_CTAG_TX if (skb_vlan_tag_present(skb)) { u16 vlan_tag = skb_vlan_tag_get(skb); vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | ((vlan_tag >> 9) & 0x8); txph->ins_vlan = 1; txph->vlan = vlan_tag; } #endif if (offset >= adapter->txd_ring_size) offset -= adapter->txd_ring_size; adapter->txd_write_ptr = offset; /* clear txs before send */ adapter->txs_ring[adapter->txs_next_clear].update = 0; if (++adapter->txs_next_clear == adapter->txs_ring_size) adapter->txs_next_clear = 0; ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX, (adapter->txd_write_ptr >> 2)); dev_consume_skb_any(skb); return NETDEV_TX_OK; } /** * atl2_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure */ static int atl2_change_mtu(struct net_device *netdev, int new_mtu) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; /* set MTU */ netdev->mtu = new_mtu; hw->max_frame_size = new_mtu; ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); return 0; } /** * atl2_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure */ static int atl2_set_mac(struct net_device *netdev, void *p) { struct atl2_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (netif_running(netdev)) return -EBUSY; eth_hw_addr_set(netdev, addr->sa_data); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atl2_set_mac_addr(&adapter->hw); return 0; } static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct atl2_adapter *adapter = netdev_priv(netdev); struct mii_ioctl_data *data = if_mii(ifr); unsigned long flags; switch (cmd) { case SIOCGMIIPHY: data->phy_id = 0; break; case SIOCGMIIREG: spin_lock_irqsave(&adapter->stats_lock, flags); if (atl2_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, &data->val_out)) { spin_unlock_irqrestore(&adapter->stats_lock, flags); return -EIO; } spin_unlock_irqrestore(&adapter->stats_lock, flags); break; case SIOCSMIIREG: if (data->reg_num & ~(0x1F)) return -EFAULT; spin_lock_irqsave(&adapter->stats_lock, flags); if (atl2_write_phy_reg(&adapter->hw, data->reg_num, data->val_in)) { spin_unlock_irqrestore(&adapter->stats_lock, flags); return -EIO; } spin_unlock_irqrestore(&adapter->stats_lock, flags); break; default: return -EOPNOTSUPP; } return 0; } static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return atl2_mii_ioctl(netdev, ifr, cmd); #ifdef ETHTOOL_OPS_COMPAT case SIOCETHTOOL: return ethtool_ioctl(ifr); #endif default: return -EOPNOTSUPP; } } /** * atl2_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * @txqueue: index of the hanging transmit queue */ static void atl2_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct atl2_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ schedule_work(&adapter->reset_task); } /** * atl2_watchdog - Timer Call-back * @t: timer list containing a pointer to netdev cast into an unsigned long */ static void atl2_watchdog(struct timer_list *t) { struct atl2_adapter *adapter = from_timer(adapter, t, watchdog_timer); if (!test_bit(__ATL2_DOWN, &adapter->flags)) { u32 drop_rxd, drop_rxs; unsigned long flags; spin_lock_irqsave(&adapter->stats_lock, flags); drop_rxd = ATL2_READ_REG(&adapter->hw, REG_STS_RXD_OV); drop_rxs = ATL2_READ_REG(&adapter->hw, REG_STS_RXS_OV); spin_unlock_irqrestore(&adapter->stats_lock, flags); adapter->netdev->stats.rx_over_errors += drop_rxd + drop_rxs; /* Reset the timer */ mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 4 * HZ)); } } /** * atl2_phy_config - Timer Call-back * @t: timer list containing a pointer to netdev cast into an unsigned long */ static void atl2_phy_config(struct timer_list *t) { struct atl2_adapter *adapter = from_timer(adapter, t, phy_config_timer); struct atl2_hw *hw = &adapter->hw; unsigned long flags; spin_lock_irqsave(&adapter->stats_lock, flags); atl2_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); atl2_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); spin_unlock_irqrestore(&adapter->stats_lock, flags); clear_bit(0, &adapter->cfg_phy); } static int atl2_up(struct atl2_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err = 0; u32 val; /* hardware has been reset, we need to reload some things */ err = atl2_init_hw(&adapter->hw); if (err) { err = -EIO; return err; } atl2_set_multi(netdev); init_ring_ptrs(adapter); atl2_restore_vlan(adapter); if (atl2_configure(adapter)) { err = -EIO; goto err_up; } clear_bit(__ATL2_DOWN, &adapter->flags); val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL); ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val | MASTER_CTRL_MANUAL_INT); atl2_irq_enable(adapter); err_up: return err; } static void atl2_reinit_locked(struct atl2_adapter *adapter) { while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags)) msleep(1); atl2_down(adapter); atl2_up(adapter); clear_bit(__ATL2_RESETTING, &adapter->flags); } static void atl2_reset_task(struct work_struct *work) { struct atl2_adapter *adapter; adapter = container_of(work, struct atl2_adapter, reset_task); atl2_reinit_locked(adapter); } static void atl2_setup_mac_ctrl(struct atl2_adapter *adapter) { u32 value; struct atl2_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; /* Config MAC CTRL Register */ value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY; /* duplex */ if (FULL_DUPLEX == adapter->link_duplex) value |= MAC_CTRL_DUPLX; /* flow control */ value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); /* PAD & CRC */ value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); /* preamble length */ value |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); /* vlan */ __atl2_vlan_mode(netdev->features, &value); /* filter mode */ value |= MAC_CTRL_BC_EN; if (netdev->flags & IFF_PROMISC) value |= MAC_CTRL_PROMIS_EN; else if (netdev->flags & IFF_ALLMULTI) value |= MAC_CTRL_MC_ALL_EN; /* half retry buffer */ value |= (((u32)(adapter->hw.retry_buf & MAC_CTRL_HALF_LEFT_BUF_MASK)) << MAC_CTRL_HALF_LEFT_BUF_SHIFT); ATL2_WRITE_REG(hw, REG_MAC_CTRL, value); } static int atl2_check_link(struct atl2_adapter *adapter) { struct atl2_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; int ret_val; u16 speed, duplex, phy_data; int reconfig = 0; /* MII_BMSR must read twise */ atl2_read_phy_reg(hw, MII_BMSR, &phy_data); atl2_read_phy_reg(hw, MII_BMSR, &phy_data); if (!(phy_data&BMSR_LSTATUS)) { /* link down */ if (netif_carrier_ok(netdev)) { /* old link state: Up */ u32 value; /* disable rx */ value = ATL2_READ_REG(hw, REG_MAC_CTRL); value &= ~MAC_CTRL_RX_EN; ATL2_WRITE_REG(hw, REG_MAC_CTRL, value); adapter->link_speed = SPEED_0; netif_carrier_off(netdev); netif_stop_queue(netdev); } return 0; } /* Link Up */ ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex); if (ret_val) return ret_val; switch (hw->MediaType) { case MEDIA_TYPE_100M_FULL: if (speed != SPEED_100 || duplex != FULL_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_100M_HALF: if (speed != SPEED_100 || duplex != HALF_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_10M_FULL: if (speed != SPEED_10 || duplex != FULL_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_10M_HALF: if (speed != SPEED_10 || duplex != HALF_DUPLEX) reconfig = 1; break; } /* link result is our setting */ if (reconfig == 0) { if (adapter->link_speed != speed || adapter->link_duplex != duplex) { adapter->link_speed = speed; adapter->link_duplex = duplex; atl2_setup_mac_ctrl(adapter); printk(KERN_INFO "%s: %s NIC Link is Up<%d Mbps %s>\n", atl2_driver_name, netdev->name, adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "Full Duplex" : "Half Duplex"); } if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ netif_carrier_on(netdev); netif_wake_queue(netdev); } return 0; } /* change original link status */ if (netif_carrier_ok(netdev)) { u32 value; /* disable rx */ value = ATL2_READ_REG(hw, REG_MAC_CTRL); value &= ~MAC_CTRL_RX_EN; ATL2_WRITE_REG(hw, REG_MAC_CTRL, value); adapter->link_speed = SPEED_0; netif_carrier_off(netdev); netif_stop_queue(netdev); } /* auto-neg, insert timer to re-config phy * (if interval smaller than 5 seconds, something strange) */ if (!test_bit(__ATL2_DOWN, &adapter->flags)) { if (!test_and_set_bit(0, &adapter->cfg_phy)) mod_timer(&adapter->phy_config_timer, round_jiffies(jiffies + 5 * HZ)); } return 0; } /** * atl2_link_chg_task - deal with link change event Out of interrupt context * @work: pointer to work struct with private info */ static void atl2_link_chg_task(struct work_struct *work) { struct atl2_adapter *adapter; unsigned long flags; adapter = container_of(work, struct atl2_adapter, link_chg_task); spin_lock_irqsave(&adapter->stats_lock, flags); atl2_check_link(adapter); spin_unlock_irqrestore(&adapter->stats_lock, flags); } static void atl2_setup_pcicmd(struct pci_dev *pdev) { u16 cmd; pci_read_config_word(pdev, PCI_COMMAND, &cmd); if (cmd & PCI_COMMAND_INTX_DISABLE) cmd &= ~PCI_COMMAND_INTX_DISABLE; if (cmd & PCI_COMMAND_IO) cmd &= ~PCI_COMMAND_IO; if (0 == (cmd & PCI_COMMAND_MEMORY)) cmd |= PCI_COMMAND_MEMORY; if (0 == (cmd & PCI_COMMAND_MASTER)) cmd |= PCI_COMMAND_MASTER; pci_write_config_word(pdev, PCI_COMMAND, cmd); /* * some motherboards BIOS(PXE/EFI) driver may set PME * while they transfer control to OS (Windows/Linux) * so we should clear this bit before NIC work normally */ pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0); } #ifdef CONFIG_NET_POLL_CONTROLLER static void atl2_poll_controller(struct net_device *netdev) { disable_irq(netdev->irq); atl2_intr(netdev->irq, netdev); enable_irq(netdev->irq); } #endif static const struct net_device_ops atl2_netdev_ops = { .ndo_open = atl2_open, .ndo_stop = atl2_close, .ndo_start_xmit = atl2_xmit_frame, .ndo_set_rx_mode = atl2_set_multi, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = atl2_set_mac, .ndo_change_mtu = atl2_change_mtu, .ndo_fix_features = atl2_fix_features, .ndo_set_features = atl2_set_features, .ndo_eth_ioctl = atl2_ioctl, .ndo_tx_timeout = atl2_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = atl2_poll_controller, #endif }; /** * atl2_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in atl2_pci_tbl * * Returns 0 on success, negative on failure * * atl2_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. */ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct atl2_adapter *adapter; static int cards_found = 0; unsigned long mmio_start; int mmio_len; int err; err = pci_enable_device(pdev); if (err) return err; /* * atl2 is a shared-high-32-bit device, so we're stuck with 32-bit DMA * until the kernel has the proper infrastructure to support 64-bit DMA * on these devices. */ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) && dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n"); err = -EIO; goto err_dma; } /* Mark all PCI regions associated with PCI device * pdev as being reserved by owner atl2_driver_name */ err = pci_request_regions(pdev, atl2_driver_name); if (err) goto err_pci_reg; /* Enables bus-mastering on the device and calls * pcibios_set_master to do the needed arch specific settings */ pci_set_master(pdev); netdev = alloc_etherdev(sizeof(struct atl2_adapter)); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; } SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; adapter->hw.back = adapter; mmio_start = pci_resource_start(pdev, 0x0); mmio_len = pci_resource_len(pdev, 0x0); adapter->hw.mem_rang = (u32)mmio_len; adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); if (!adapter->hw.hw_addr) { err = -EIO; goto err_ioremap; } atl2_setup_pcicmd(pdev); netdev->netdev_ops = &atl2_netdev_ops; netdev->ethtool_ops = &atl2_ethtool_ops; netdev->watchdog_timeo = 5 * HZ; netdev->min_mtu = 40; netdev->max_mtu = ETH_DATA_LEN + VLAN_HLEN; strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len; adapter->bd_number = cards_found; adapter->pci_using_64 = false; /* setup the private structure */ err = atl2_sw_init(adapter); if (err) goto err_sw_init; netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX; netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); /* Init PHY as early as possible due to power saving issue */ atl2_phy_init(&adapter->hw); /* reset the controller to * put the device in a known good starting state */ if (atl2_reset_hw(&adapter->hw)) { err = -EIO; goto err_reset; } /* copy the MAC address out of the EEPROM */ atl2_read_mac_addr(&adapter->hw); eth_hw_addr_set(netdev, adapter->hw.mac_addr); if (!is_valid_ether_addr(netdev->dev_addr)) { err = -EIO; goto err_eeprom; } atl2_check_options(adapter); timer_setup(&adapter->watchdog_timer, atl2_watchdog, 0); timer_setup(&adapter->phy_config_timer, atl2_phy_config, 0); INIT_WORK(&adapter->reset_task, atl2_reset_task); INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task); strcpy(netdev->name, "eth%d"); /* ?? */ err = register_netdev(netdev); if (err) goto err_register; /* assume we have no link for now */ netif_carrier_off(netdev); netif_stop_queue(netdev); cards_found++; return 0; err_reset: err_register: err_sw_init: err_eeprom: iounmap(adapter->hw.hw_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /** * atl2_remove - Device Removal Routine * @pdev: PCI device information struct * * atl2_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. */ /* FIXME: write the original MAC address back in case it was changed from a * BIOS-set value, as in atl1 -- CHS */ static void atl2_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl2_adapter *adapter = netdev_priv(netdev); /* flush_scheduled work may reschedule our watchdog task, so * explicitly disable watchdog tasks from being rescheduled */ set_bit(__ATL2_DOWN, &adapter->flags); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_config_timer); cancel_work_sync(&adapter->reset_task); cancel_work_sync(&adapter->link_chg_task); unregister_netdev(netdev); atl2_force_ps(&adapter->hw); iounmap(adapter->hw.hw_addr); pci_release_regions(pdev); free_netdev(netdev); pci_disable_device(pdev); } static int atl2_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; u16 speed, duplex; u32 ctrl = 0; u32 wufc = adapter->wol; #ifdef CONFIG_PM int retval = 0; #endif netif_device_detach(netdev); if (netif_running(netdev)) { WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags)); atl2_down(adapter); } #ifdef CONFIG_PM retval = pci_save_state(pdev); if (retval) return retval; #endif atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl); atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl); if (ctrl & BMSR_LSTATUS) wufc &= ~ATLX_WUFC_LNKC; if (0 != (ctrl & BMSR_LSTATUS) && 0 != wufc) { u32 ret_val; /* get current link speed & duplex */ ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex); if (ret_val) { printk(KERN_DEBUG "%s: get speed&duplex error while suspend\n", atl2_driver_name); goto wol_dis; } ctrl = 0; /* turn on magic packet wol */ if (wufc & ATLX_WUFC_MAG) ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); /* ignore Link Chg event when Link is up */ ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl); /* Config MAC CTRL Register */ ctrl = MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY; if (FULL_DUPLEX == adapter->link_duplex) ctrl |= MAC_CTRL_DUPLX; ctrl |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); ctrl |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); ctrl |= (((u32)(adapter->hw.retry_buf & MAC_CTRL_HALF_LEFT_BUF_MASK)) << MAC_CTRL_HALF_LEFT_BUF_SHIFT); if (wufc & ATLX_WUFC_MAG) { /* magic packet maybe Broadcast&multicast&Unicast */ ctrl |= MAC_CTRL_BC_EN; } ATL2_WRITE_REG(hw, REG_MAC_CTRL, ctrl); /* pcie patch */ ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1); ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK; ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl); pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); goto suspend_exit; } if (0 == (ctrl&BMSR_LSTATUS) && 0 != (wufc&ATLX_WUFC_LNKC)) { /* link is down, so only LINK CHG WOL event enable */ ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl); ATL2_WRITE_REG(hw, REG_MAC_CTRL, 0); /* pcie patch */ ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1); ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK; ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl); hw->phy_configured = false; /* re-init PHY when resume */ pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); goto suspend_exit; } wol_dis: /* WOL disabled */ ATL2_WRITE_REG(hw, REG_WOL_CTRL, 0); /* pcie patch */ ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1); ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK; ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl); atl2_force_ps(hw); hw->phy_configured = false; /* re-init PHY when resume */ pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); suspend_exit: if (netif_running(netdev)) atl2_free_irq(adapter); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } #ifdef CONFIG_PM static int atl2_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl2_adapter *adapter = netdev_priv(netdev); u32 err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); err = pci_enable_device(pdev); if (err) { printk(KERN_ERR "atl2: Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); ATL2_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */ pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); if (netif_running(netdev)) { err = atl2_request_irq(adapter); if (err) return err; } atl2_reset_hw(&adapter->hw); if (netif_running(netdev)) atl2_up(adapter); netif_device_attach(netdev); return 0; } #endif static void atl2_shutdown(struct pci_dev *pdev) { atl2_suspend(pdev, PMSG_SUSPEND); } static struct pci_driver atl2_driver = { .name = atl2_driver_name, .id_table = atl2_pci_tbl, .probe = atl2_probe, .remove = atl2_remove, /* Power Management Hooks */ .suspend = atl2_suspend, #ifdef CONFIG_PM .resume = atl2_resume, #endif .shutdown = atl2_shutdown, }; module_pci_driver(atl2_driver); static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value) { struct atl2_adapter *adapter = hw->back; pci_read_config_word(adapter->pdev, reg, value); } static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value) { struct atl2_adapter *adapter = hw->back; pci_write_config_word(adapter->pdev, reg, *value); } static int atl2_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; u32 supported, advertising; supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP); advertising = ADVERTISED_TP; advertising |= ADVERTISED_Autoneg; advertising |= hw->autoneg_advertised; cmd->base.port = PORT_TP; cmd->base.phy_address = 0; if (adapter->link_speed != SPEED_0) { cmd->base.speed = adapter->link_speed; if (adapter->link_duplex == FULL_DUPLEX) cmd->base.duplex = DUPLEX_FULL; else cmd->base.duplex = DUPLEX_HALF; } else { cmd->base.speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN; } cmd->base.autoneg = AUTONEG_ENABLE; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); return 0; } static int atl2_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; u32 advertising; ethtool_convert_link_mode_to_legacy_u32(&advertising, cmd->link_modes.advertising); while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags)) msleep(1); if (cmd->base.autoneg == AUTONEG_ENABLE) { #define MY_ADV_MASK (ADVERTISE_10_HALF | \ ADVERTISE_10_FULL | \ ADVERTISE_100_HALF| \ ADVERTISE_100_FULL) if ((advertising & MY_ADV_MASK) == MY_ADV_MASK) { hw->MediaType = MEDIA_TYPE_AUTO_SENSOR; hw->autoneg_advertised = MY_ADV_MASK; } else if ((advertising & MY_ADV_MASK) == ADVERTISE_100_FULL) { hw->MediaType = MEDIA_TYPE_100M_FULL; hw->autoneg_advertised = ADVERTISE_100_FULL; } else if ((advertising & MY_ADV_MASK) == ADVERTISE_100_HALF) { hw->MediaType = MEDIA_TYPE_100M_HALF; hw->autoneg_advertised = ADVERTISE_100_HALF; } else if ((advertising & MY_ADV_MASK) == ADVERTISE_10_FULL) { hw->MediaType = MEDIA_TYPE_10M_FULL; hw->autoneg_advertised = ADVERTISE_10_FULL; } else if ((advertising & MY_ADV_MASK) == ADVERTISE_10_HALF) { hw->MediaType = MEDIA_TYPE_10M_HALF; hw->autoneg_advertised = ADVERTISE_10_HALF; } else { clear_bit(__ATL2_RESETTING, &adapter->flags); return -EINVAL; } advertising = hw->autoneg_advertised | ADVERTISED_TP | ADVERTISED_Autoneg; } else { clear_bit(__ATL2_RESETTING, &adapter->flags); return -EINVAL; } /* reset the link */ if (netif_running(adapter->netdev)) { atl2_down(adapter); atl2_up(adapter); } else atl2_reset_hw(&adapter->hw); clear_bit(__ATL2_RESETTING, &adapter->flags); return 0; } static u32 atl2_get_msglevel(struct net_device *netdev) { return 0; } /* * It's sane for this to be empty, but we might want to take advantage of this. */ static void atl2_set_msglevel(struct net_device *netdev, u32 data) { } static int atl2_get_regs_len(struct net_device *netdev) { #define ATL2_REGS_LEN 42 return sizeof(u32) * ATL2_REGS_LEN; } static void atl2_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; u32 *regs_buff = p; u16 phy_data; memset(p, 0, sizeof(u32) * ATL2_REGS_LEN); regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; regs_buff[0] = ATL2_READ_REG(hw, REG_VPD_CAP); regs_buff[1] = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL); regs_buff[2] = ATL2_READ_REG(hw, REG_SPI_FLASH_CONFIG); regs_buff[3] = ATL2_READ_REG(hw, REG_TWSI_CTRL); regs_buff[4] = ATL2_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL); regs_buff[5] = ATL2_READ_REG(hw, REG_MASTER_CTRL); regs_buff[6] = ATL2_READ_REG(hw, REG_MANUAL_TIMER_INIT); regs_buff[7] = ATL2_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT); regs_buff[8] = ATL2_READ_REG(hw, REG_PHY_ENABLE); regs_buff[9] = ATL2_READ_REG(hw, REG_CMBDISDMA_TIMER); regs_buff[10] = ATL2_READ_REG(hw, REG_IDLE_STATUS); regs_buff[11] = ATL2_READ_REG(hw, REG_MDIO_CTRL); regs_buff[12] = ATL2_READ_REG(hw, REG_SERDES_LOCK); regs_buff[13] = ATL2_READ_REG(hw, REG_MAC_CTRL); regs_buff[14] = ATL2_READ_REG(hw, REG_MAC_IPG_IFG); regs_buff[15] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR); regs_buff[16] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR+4); regs_buff[17] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE); regs_buff[18] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE+4); regs_buff[19] = ATL2_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL); regs_buff[20] = ATL2_READ_REG(hw, REG_MTU); regs_buff[21] = ATL2_READ_REG(hw, REG_WOL_CTRL); regs_buff[22] = ATL2_READ_REG(hw, REG_SRAM_TXRAM_END); regs_buff[23] = ATL2_READ_REG(hw, REG_DESC_BASE_ADDR_HI); regs_buff[24] = ATL2_READ_REG(hw, REG_TXD_BASE_ADDR_LO); regs_buff[25] = ATL2_READ_REG(hw, REG_TXD_MEM_SIZE); regs_buff[26] = ATL2_READ_REG(hw, REG_TXS_BASE_ADDR_LO); regs_buff[27] = ATL2_READ_REG(hw, REG_TXS_MEM_SIZE); regs_buff[28] = ATL2_READ_REG(hw, REG_RXD_BASE_ADDR_LO); regs_buff[29] = ATL2_READ_REG(hw, REG_RXD_BUF_NUM); regs_buff[30] = ATL2_READ_REG(hw, REG_DMAR); regs_buff[31] = ATL2_READ_REG(hw, REG_TX_CUT_THRESH); regs_buff[32] = ATL2_READ_REG(hw, REG_DMAW); regs_buff[33] = ATL2_READ_REG(hw, REG_PAUSE_ON_TH); regs_buff[34] = ATL2_READ_REG(hw, REG_PAUSE_OFF_TH); regs_buff[35] = ATL2_READ_REG(hw, REG_MB_TXD_WR_IDX); regs_buff[36] = ATL2_READ_REG(hw, REG_MB_RXD_RD_IDX); regs_buff[38] = ATL2_READ_REG(hw, REG_ISR); regs_buff[39] = ATL2_READ_REG(hw, REG_IMR); atl2_read_phy_reg(hw, MII_BMCR, &phy_data); regs_buff[40] = (u32)phy_data; atl2_read_phy_reg(hw, MII_BMSR, &phy_data); regs_buff[41] = (u32)phy_data; } static int atl2_get_eeprom_len(struct net_device *netdev) { struct atl2_adapter *adapter = netdev_priv(netdev); if (!atl2_check_eeprom_exist(&adapter->hw)) return 512; else return 0; } static int atl2_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; u32 *eeprom_buff; int first_dword, last_dword; int ret_val = 0; int i; if (eeprom->len == 0) return -EINVAL; if (atl2_check_eeprom_exist(hw)) return -EINVAL; eeprom->magic = hw->vendor_id | (hw->device_id << 16); first_dword = eeprom->offset >> 2; last_dword = (eeprom->offset + eeprom->len - 1) >> 2; eeprom_buff = kmalloc_array(last_dword - first_dword + 1, sizeof(u32), GFP_KERNEL); if (!eeprom_buff) return -ENOMEM; for (i = first_dword; i < last_dword; i++) { if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) { ret_val = -EIO; goto free; } } memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), eeprom->len); free: kfree(eeprom_buff); return ret_val; } static int atl2_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; u32 *eeprom_buff; u32 *ptr; int max_len, first_dword, last_dword, ret_val = 0; int i; if (eeprom->len == 0) return -EOPNOTSUPP; if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) return -EFAULT; max_len = 512; first_dword = eeprom->offset >> 2; last_dword = (eeprom->offset + eeprom->len - 1) >> 2; eeprom_buff = kmalloc(max_len, GFP_KERNEL); if (!eeprom_buff) return -ENOMEM; ptr = eeprom_buff; if (eeprom->offset & 3) { /* need read/modify/write of first changed EEPROM word */ /* only the second byte of the word is being modified */ if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0]))) { ret_val = -EIO; goto out; } ptr++; } if (((eeprom->offset + eeprom->len) & 3)) { /* * need read/modify/write of last changed EEPROM word * only the first byte of the word is being modified */ if (!atl2_read_eeprom(hw, last_dword * 4, &(eeprom_buff[last_dword - first_dword]))) { ret_val = -EIO; goto out; } } /* Device's eeprom is always little-endian, word addressable */ memcpy(ptr, bytes, eeprom->len); for (i = 0; i < last_dword - first_dword + 1; i++) { if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i])) { ret_val = -EIO; goto out; } } out: kfree(eeprom_buff); return ret_val; } static void atl2_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct atl2_adapter *adapter = netdev_priv(netdev); strscpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver)); strscpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version)); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); } static void atl2_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl2_adapter *adapter = netdev_priv(netdev); wol->supported = WAKE_MAGIC; wol->wolopts = 0; if (adapter->wol & ATLX_WUFC_EX) wol->wolopts |= WAKE_UCAST; if (adapter->wol & ATLX_WUFC_MC) wol->wolopts |= WAKE_MCAST; if (adapter->wol & ATLX_WUFC_BC) wol->wolopts |= WAKE_BCAST; if (adapter->wol & ATLX_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; if (adapter->wol & ATLX_WUFC_LNKC) wol->wolopts |= WAKE_PHY; } static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl2_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) return -EOPNOTSUPP; if (wol->wolopts & (WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) return -EOPNOTSUPP; /* these settings will always override what we currently have */ adapter->wol = 0; if (wol->wolopts & WAKE_MAGIC) adapter->wol |= ATLX_WUFC_MAG; if (wol->wolopts & WAKE_PHY) adapter->wol |= ATLX_WUFC_LNKC; return 0; } static int atl2_nway_reset(struct net_device *netdev) { struct atl2_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) atl2_reinit_locked(adapter); return 0; } static const struct ethtool_ops atl2_ethtool_ops = { .get_drvinfo = atl2_get_drvinfo, .get_regs_len = atl2_get_regs_len, .get_regs = atl2_get_regs, .get_wol = atl2_get_wol, .set_wol = atl2_set_wol, .get_msglevel = atl2_get_msglevel, .set_msglevel = atl2_set_msglevel, .nway_reset = atl2_nway_reset, .get_link = ethtool_op_get_link, .get_eeprom_len = atl2_get_eeprom_len, .get_eeprom = atl2_get_eeprom, .set_eeprom = atl2_set_eeprom, .get_link_ksettings = atl2_get_link_ksettings, .set_link_ksettings = atl2_set_link_ksettings, }; #define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \ (((a) & 0xff00ff00) >> 8)) #define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16)) #define SHORTSWAP(a) (((a) << 8) | ((a) >> 8)) /* * Reset the transmit and receive units; mask and clear all interrupts. * * hw - Struct containing variables accessed by shared code * return : 0 or idle status (if error) */ static s32 atl2_reset_hw(struct atl2_hw *hw) { u32 icr; u16 pci_cfg_cmd_word; int i; /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ atl2_read_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word); if ((pci_cfg_cmd_word & (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) != (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) { pci_cfg_cmd_word |= (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER); atl2_write_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word); } /* Clear Interrupt mask to stop board from generating * interrupts & Clear any pending interrupt events */ /* FIXME */ /* ATL2_WRITE_REG(hw, REG_IMR, 0); */ /* ATL2_WRITE_REG(hw, REG_ISR, 0xffffffff); */ /* Issue Soft Reset to the MAC. This will reset the chip's * transmit, receive, DMA. It will not effect * the current PCI configuration. The global reset bit is self- * clearing, and should clear within a microsecond. */ ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST); wmb(); msleep(1); /* delay about 1ms */ /* Wait at least 10ms for All module to be Idle */ for (i = 0; i < 10; i++) { icr = ATL2_READ_REG(hw, REG_IDLE_STATUS); if (!icr) break; msleep(1); /* delay 1 ms */ cpu_relax(); } if (icr) return icr; return 0; } #define CUSTOM_SPI_CS_SETUP 2 #define CUSTOM_SPI_CLK_HI 2 #define CUSTOM_SPI_CLK_LO 2 #define CUSTOM_SPI_CS_HOLD 2 #define CUSTOM_SPI_CS_HI 3 static struct atl2_spi_flash_dev flash_table[] = { /* MFR WRSR READ PROGRAM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */ {"Atmel", 0x0, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62 }, {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60 }, {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7 }, }; static bool atl2_spi_read(struct atl2_hw *hw, u32 addr, u32 *buf) { int i; u32 value; ATL2_WRITE_REG(hw, REG_SPI_DATA, 0); ATL2_WRITE_REG(hw, REG_SPI_ADDR, addr); value = SPI_FLASH_CTRL_WAIT_READY | (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) << SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) << SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) << SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) << SPI_FLASH_CTRL_CS_HI_SHIFT | (0x1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT; ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); value |= SPI_FLASH_CTRL_START; ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); for (i = 0; i < 10; i++) { msleep(1); value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL); if (!(value & SPI_FLASH_CTRL_START)) break; } if (value & SPI_FLASH_CTRL_START) return false; *buf = ATL2_READ_REG(hw, REG_SPI_DATA); return true; } /* * get_permanent_address * return 0 if get valid mac address, */ static int get_permanent_address(struct atl2_hw *hw) { u32 Addr[2]; u32 i, Control; u16 Register; u8 EthAddr[ETH_ALEN]; bool KeyValid; if (is_valid_ether_addr(hw->perm_mac_addr)) return 0; Addr[0] = 0; Addr[1] = 0; if (!atl2_check_eeprom_exist(hw)) { /* eeprom exists */ Register = 0; KeyValid = false; /* Read out all EEPROM content */ i = 0; while (1) { if (atl2_read_eeprom(hw, i + 0x100, &Control)) { if (KeyValid) { if (Register == REG_MAC_STA_ADDR) Addr[0] = Control; else if (Register == (REG_MAC_STA_ADDR + 4)) Addr[1] = Control; KeyValid = false; } else if ((Control & 0xff) == 0x5A) { KeyValid = true; Register = (u16) (Control >> 16); } else { /* assume data end while encount an invalid KEYWORD */ break; } } else { break; /* read error */ } i += 4; } *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]); if (is_valid_ether_addr(EthAddr)) { memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN); return 0; } return 1; } /* see if SPI flash exists? */ Addr[0] = 0; Addr[1] = 0; Register = 0; KeyValid = false; i = 0; while (1) { if (atl2_spi_read(hw, i + 0x1f000, &Control)) { if (KeyValid) { if (Register == REG_MAC_STA_ADDR) Addr[0] = Control; else if (Register == (REG_MAC_STA_ADDR + 4)) Addr[1] = Control; KeyValid = false; } else if ((Control & 0xff) == 0x5A) { KeyValid = true; Register = (u16) (Control >> 16); } else { break; /* data end */ } } else { break; /* read error */ } i += 4; } *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *)&Addr[1]); if (is_valid_ether_addr(EthAddr)) { memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN); return 0; } /* maybe MAC-address is from BIOS */ Addr[0] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR); Addr[1] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR + 4); *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]); if (is_valid_ether_addr(EthAddr)) { memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN); return 0; } return 1; } /* * Reads the adapter's MAC address from the EEPROM * * hw - Struct containing variables accessed by shared code */ static s32 atl2_read_mac_addr(struct atl2_hw *hw) { if (get_permanent_address(hw)) { /* for test */ /* FIXME: shouldn't we use eth_random_addr() here? */ hw->perm_mac_addr[0] = 0x00; hw->perm_mac_addr[1] = 0x13; hw->perm_mac_addr[2] = 0x74; hw->perm_mac_addr[3] = 0x00; hw->perm_mac_addr[4] = 0x5c; hw->perm_mac_addr[5] = 0x38; } memcpy(hw->mac_addr, hw->perm_mac_addr, ETH_ALEN); return 0; } /* * Hashes an address to determine its location in the multicast table * * hw - Struct containing variables accessed by shared code * mc_addr - the multicast address to hash * * atl2_hash_mc_addr * purpose * set hash value for a multicast address * hash calcu processing : * 1. calcu 32bit CRC for multicast address * 2. reverse crc with MSB to LSB */ static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr) { u32 crc32, value; int i; value = 0; crc32 = ether_crc_le(6, mc_addr); for (i = 0; i < 32; i++) value |= (((crc32 >> i) & 1) << (31 - i)); return value; } /* * Sets the bit in the multicast table corresponding to the hash value. * * hw - Struct containing variables accessed by shared code * hash_value - Multicast address hash value */ static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value) { u32 hash_bit, hash_reg; u32 mta; /* The HASH Table is a register array of 2 32-bit registers. * It is treated like an array of 64 bits. We want to set * bit BitArray[hash_value]. So we figure out what register * the bit is in, read it, OR in the new bit, then write * back the new value. The register is determined by the * upper 7 bits of the hash value and the bit within that * register are determined by the lower 5 bits of the value. */ hash_reg = (hash_value >> 31) & 0x1; hash_bit = (hash_value >> 26) & 0x1F; mta = ATL2_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); mta |= (1 << hash_bit); ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); } /* * atl2_init_pcie - init PCIE module */ static void atl2_init_pcie(struct atl2_hw *hw) { u32 value; value = LTSSM_TEST_MODE_DEF; ATL2_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value); value = PCIE_DLL_TX_CTRL1_DEF; ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, value); } static void atl2_init_flash_opcode(struct atl2_hw *hw) { if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) hw->flash_vendor = 0; /* ATMEL */ /* Init OP table */ ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_PROGRAM, flash_table[hw->flash_vendor].cmdPROGRAM); ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_SC_ERASE, flash_table[hw->flash_vendor].cmdSECTOR_ERASE); ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_CHIP_ERASE, flash_table[hw->flash_vendor].cmdCHIP_ERASE); ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDID, flash_table[hw->flash_vendor].cmdRDID); ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WREN, flash_table[hw->flash_vendor].cmdWREN); ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDSR, flash_table[hw->flash_vendor].cmdRDSR); ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WRSR, flash_table[hw->flash_vendor].cmdWRSR); ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_READ, flash_table[hw->flash_vendor].cmdREAD); } /******************************************************************** * Performs basic configuration of the adapter. * * hw - Struct containing variables accessed by shared code * Assumes that the controller has previously been reset and is in a * post-reset uninitialized state. Initializes multicast table, * and Calls routines to setup link * Leaves the transmit and receive units disabled and uninitialized. ********************************************************************/ static s32 atl2_init_hw(struct atl2_hw *hw) { u32 ret_val = 0; atl2_init_pcie(hw); /* Zero out the Multicast HASH table */ /* clear the old settings from the multicast hash table */ ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); atl2_init_flash_opcode(hw); ret_val = atl2_phy_init(hw); return ret_val; } /* * Detects the current speed and duplex settings of the hardware. * * hw - Struct containing variables accessed by shared code * speed - Speed of the connection * duplex - Duplex setting of the connection */ static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed, u16 *duplex) { s32 ret_val; u16 phy_data; /* Read PHY Specific Status Register (17) */ ret_val = atl2_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data); if (ret_val) return ret_val; if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED)) return ATLX_ERR_PHY_RES; switch (phy_data & MII_ATLX_PSSR_SPEED) { case MII_ATLX_PSSR_100MBS: *speed = SPEED_100; break; case MII_ATLX_PSSR_10MBS: *speed = SPEED_10; break; default: return ATLX_ERR_PHY_SPEED; } if (phy_data & MII_ATLX_PSSR_DPLX) *duplex = FULL_DUPLEX; else *duplex = HALF_DUPLEX; return 0; } /* * Reads the value from a PHY register * hw - Struct containing variables accessed by shared code * reg_addr - address of the PHY register to read */ static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data) { u32 val; int i; val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val); wmb(); for (i = 0; i < MDIO_WAIT_TIMES; i++) { udelay(2); val = ATL2_READ_REG(hw, REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; wmb(); } if (!(val & (MDIO_START | MDIO_BUSY))) { *phy_data = (u16)val; return 0; } return ATLX_ERR_PHY; } /* * Writes a value to a PHY register * hw - Struct containing variables accessed by shared code * reg_addr - address of the PHY register to write * data - data to write to the PHY */ static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data) { int i; u32 val; val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | MDIO_SUP_PREAMBLE | MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val); wmb(); for (i = 0; i < MDIO_WAIT_TIMES; i++) { udelay(2); val = ATL2_READ_REG(hw, REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; wmb(); } if (!(val & (MDIO_START | MDIO_BUSY))) return 0; return ATLX_ERR_PHY; } /* * Configures PHY autoneg and flow control advertisement settings * * hw - Struct containing variables accessed by shared code */ static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw) { s16 mii_autoneg_adv_reg; /* Read the MII Auto-Neg Advertisement Register (Address 4). */ mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; /* Need to parse autoneg_advertised and set up * the appropriate PHY registers. First we will parse for * autoneg_advertised software override. Since we can advertise * a plethora of combinations, we need to check each bit * individually. */ /* First we clear all the 10/100 mb speed bits in the Auto-Neg * Advertisement Register (Address 4) and the 1000 mb speed bits in * the 1000Base-T Control Register (Address 9). */ mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; /* Need to parse MediaType and setup the * appropriate PHY registers. */ switch (hw->MediaType) { case MEDIA_TYPE_AUTO_SENSOR: mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | MII_AR_10T_FD_CAPS | MII_AR_100TX_HD_CAPS| MII_AR_100TX_FD_CAPS); hw->autoneg_advertised = ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF| ADVERTISE_100_FULL; break; case MEDIA_TYPE_100M_FULL: mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; hw->autoneg_advertised = ADVERTISE_100_FULL; break; case MEDIA_TYPE_100M_HALF: mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; hw->autoneg_advertised = ADVERTISE_100_HALF; break; case MEDIA_TYPE_10M_FULL: mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; hw->autoneg_advertised = ADVERTISE_10_FULL; break; default: mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; hw->autoneg_advertised = ADVERTISE_10_HALF; break; } /* flow control fixed to enable all */ mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; return atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); } /* * Resets the PHY and make all config validate * * hw - Struct containing variables accessed by shared code * * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) */ static s32 atl2_phy_commit(struct atl2_hw *hw) { s32 ret_val; u16 phy_data; phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; ret_val = atl2_write_phy_reg(hw, MII_BMCR, phy_data); if (ret_val) { u32 val; int i; /* pcie serdes link may be down ! */ for (i = 0; i < 25; i++) { msleep(1); val = ATL2_READ_REG(hw, REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; } if (0 != (val & (MDIO_START | MDIO_BUSY))) { printk(KERN_ERR "atl2: PCIe link down for at least 25ms !\n"); return ret_val; } } return 0; } static s32 atl2_phy_init(struct atl2_hw *hw) { s32 ret_val; u16 phy_val; if (hw->phy_configured) return 0; /* Enable PHY */ ATL2_WRITE_REGW(hw, REG_PHY_ENABLE, 1); ATL2_WRITE_FLUSH(hw); msleep(1); /* check if the PHY is in powersaving mode */ atl2_write_phy_reg(hw, MII_DBG_ADDR, 0); atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val); /* 024E / 124E 0r 0274 / 1274 ? */ if (phy_val & 0x1000) { phy_val &= ~0x1000; atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val); } msleep(1); /*Enable PHY LinkChange Interrupt */ ret_val = atl2_write_phy_reg(hw, 18, 0xC00); if (ret_val) return ret_val; /* setup AutoNeg parameters */ ret_val = atl2_phy_setup_autoneg_adv(hw); if (ret_val) return ret_val; /* SW.Reset & En-Auto-Neg to restart Auto-Neg */ ret_val = atl2_phy_commit(hw); if (ret_val) return ret_val; hw->phy_configured = true; return ret_val; } static void atl2_set_mac_addr(struct atl2_hw *hw) { u32 value; /* 00-0B-6A-F6-00-DC * 0: 6AF600DC 1: 000B * low dword */ value = (((u32)hw->mac_addr[2]) << 24) | (((u32)hw->mac_addr[3]) << 16) | (((u32)hw->mac_addr[4]) << 8) | (((u32)hw->mac_addr[5])); ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); /* hight dword */ value = (((u32)hw->mac_addr[0]) << 8) | (((u32)hw->mac_addr[1])); ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); } /* * check_eeprom_exist * return 0 if eeprom exist */ static int atl2_check_eeprom_exist(struct atl2_hw *hw) { u32 value; value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL); if (value & SPI_FLASH_CTRL_EN_VPD) { value &= ~SPI_FLASH_CTRL_EN_VPD; ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); } value = ATL2_READ_REGW(hw, REG_PCIE_CAP_LIST); return ((value & 0xFF00) == 0x6C00) ? 0 : 1; } /* FIXME: This doesn't look right. -- CHS */ static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value) { return true; } static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue) { int i; u32 Control; if (Offset & 0x3) return false; /* address do not align */ ATL2_WRITE_REG(hw, REG_VPD_DATA, 0); Control = (Offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; ATL2_WRITE_REG(hw, REG_VPD_CAP, Control); for (i = 0; i < 10; i++) { msleep(2); Control = ATL2_READ_REG(hw, REG_VPD_CAP); if (Control & VPD_CAP_VPD_FLAG) break; } if (Control & VPD_CAP_VPD_FLAG) { *pValue = ATL2_READ_REG(hw, REG_VPD_DATA); return true; } return false; /* timeout */ } static void atl2_force_ps(struct atl2_hw *hw) { u16 phy_val; atl2_write_phy_reg(hw, MII_DBG_ADDR, 0); atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val); atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val | 0x1000); atl2_write_phy_reg(hw, MII_DBG_ADDR, 2); atl2_write_phy_reg(hw, MII_DBG_DATA, 0x3000); atl2_write_phy_reg(hw, MII_DBG_ADDR, 3); atl2_write_phy_reg(hw, MII_DBG_DATA, 0); } /* This is the only thing that needs to be changed to adjust the * maximum number of ports that the driver can manage. */ #define ATL2_MAX_NIC 4 #define OPTION_UNSET -1 #define OPTION_DISABLED 0 #define OPTION_ENABLED 1 /* All parameters are treated the same, as an integer array of values. * This macro just reduces the need to repeat the same declaration code * over and over (plus this helps to avoid typo bugs). */ #define ATL2_PARAM_INIT {[0 ... ATL2_MAX_NIC] = OPTION_UNSET} #ifndef module_param_array /* Module Parameters are always initialized to -1, so that the driver * can tell the difference between no user specified value or the * user asking for the default value. * The true default values are loaded in when atl2_check_options is called. * * This is a GCC extension to ANSI C. * See the item "Labeled Elements in Initializers" in the section * "Extensions to the C Language Family" of the GCC documentation. */ #define ATL2_PARAM(X, desc) \ static const int X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \ MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \ MODULE_PARM_DESC(X, desc); #else #define ATL2_PARAM(X, desc) \ static int X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \ static unsigned int num_##X; \ module_param_array_named(X, X, int, &num_##X, 0); \ MODULE_PARM_DESC(X, desc); #endif /* * Transmit Memory Size * Valid Range: 64-2048 * Default Value: 128 */ #define ATL2_MIN_TX_MEMSIZE 4 /* 4KB */ #define ATL2_MAX_TX_MEMSIZE 64 /* 64KB */ #define ATL2_DEFAULT_TX_MEMSIZE 8 /* 8KB */ ATL2_PARAM(TxMemSize, "Bytes of Transmit Memory"); /* * Receive Memory Block Count * Valid Range: 16-512 * Default Value: 128 */ #define ATL2_MIN_RXD_COUNT 16 #define ATL2_MAX_RXD_COUNT 512 #define ATL2_DEFAULT_RXD_COUNT 64 ATL2_PARAM(RxMemBlock, "Number of receive memory block"); /* * User Specified MediaType Override * * Valid Range: 0-5 * - 0 - auto-negotiate at all supported speeds * - 1 - only link at 1000Mbps Full Duplex * - 2 - only link at 100Mbps Full Duplex * - 3 - only link at 100Mbps Half Duplex * - 4 - only link at 10Mbps Full Duplex * - 5 - only link at 10Mbps Half Duplex * Default Value: 0 */ ATL2_PARAM(MediaType, "MediaType Select"); /* * Interrupt Moderate Timer in units of 2048 ns (~2 us) * Valid Range: 10-65535 * Default Value: 45000(90ms) */ #define INT_MOD_DEFAULT_CNT 100 /* 200us */ #define INT_MOD_MAX_CNT 65000 #define INT_MOD_MIN_CNT 50 ATL2_PARAM(IntModTimer, "Interrupt Moderator Timer"); /* * FlashVendor * Valid Range: 0-2 * 0 - Atmel * 1 - SST * 2 - ST */ ATL2_PARAM(FlashVendor, "SPI Flash Vendor"); #define AUTONEG_ADV_DEFAULT 0x2F #define AUTONEG_ADV_MASK 0x2F #define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL #define FLASH_VENDOR_DEFAULT 0 #define FLASH_VENDOR_MIN 0 #define FLASH_VENDOR_MAX 2 struct atl2_option { enum { enable_option, range_option, list_option } type; char *name; char *err; int def; union { struct { /* range_option info */ int min; int max; } r; struct { /* list_option info */ int nr; struct atl2_opt_list { int i; char *str; } *p; } l; } arg; }; static int atl2_validate_option(int *value, struct atl2_option *opt) { int i; struct atl2_opt_list *ent; if (*value == OPTION_UNSET) { *value = opt->def; return 0; } switch (opt->type) { case enable_option: switch (*value) { case OPTION_ENABLED: printk(KERN_INFO "%s Enabled\n", opt->name); return 0; case OPTION_DISABLED: printk(KERN_INFO "%s Disabled\n", opt->name); return 0; } break; case range_option: if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { printk(KERN_INFO "%s set to %i\n", opt->name, *value); return 0; } break; case list_option: for (i = 0; i < opt->arg.l.nr; i++) { ent = &opt->arg.l.p[i]; if (*value == ent->i) { if (ent->str[0] != '\0') printk(KERN_INFO "%s\n", ent->str); return 0; } } break; default: BUG(); } printk(KERN_INFO "Invalid %s specified (%i) %s\n", opt->name, *value, opt->err); *value = opt->def; return -1; } /** * atl2_check_options - Range Checking for Command Line Parameters * @adapter: board private structure * * This routine checks all command line parameters for valid user * input. If an invalid value is given, or if no user specified * value exists, a default value is used. The final value is stored * in a variable in the adapter structure. */ static void atl2_check_options(struct atl2_adapter *adapter) { int val; struct atl2_option opt; int bd = adapter->bd_number; if (bd >= ATL2_MAX_NIC) { printk(KERN_NOTICE "Warning: no configuration for board #%i\n", bd); printk(KERN_NOTICE "Using defaults for all values\n"); #ifndef module_param_array bd = ATL2_MAX_NIC; #endif } /* Bytes of Transmit Memory */ opt.type = range_option; opt.name = "Bytes of Transmit Memory"; opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_TX_MEMSIZE); opt.def = ATL2_DEFAULT_TX_MEMSIZE; opt.arg.r.min = ATL2_MIN_TX_MEMSIZE; opt.arg.r.max = ATL2_MAX_TX_MEMSIZE; #ifdef module_param_array if (num_TxMemSize > bd) { #endif val = TxMemSize[bd]; atl2_validate_option(&val, &opt); adapter->txd_ring_size = ((u32) val) * 1024; #ifdef module_param_array } else adapter->txd_ring_size = ((u32)opt.def) * 1024; #endif /* txs ring size: */ adapter->txs_ring_size = adapter->txd_ring_size / 128; if (adapter->txs_ring_size > 160) adapter->txs_ring_size = 160; /* Receive Memory Block Count */ opt.type = range_option; opt.name = "Number of receive memory block"; opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_RXD_COUNT); opt.def = ATL2_DEFAULT_RXD_COUNT; opt.arg.r.min = ATL2_MIN_RXD_COUNT; opt.arg.r.max = ATL2_MAX_RXD_COUNT; #ifdef module_param_array if (num_RxMemBlock > bd) { #endif val = RxMemBlock[bd]; atl2_validate_option(&val, &opt); adapter->rxd_ring_size = (u32)val; /* FIXME */ /* ((u16)val)&~1; */ /* even number */ #ifdef module_param_array } else adapter->rxd_ring_size = (u32)opt.def; #endif /* init RXD Flow control value */ adapter->hw.fc_rxd_hi = (adapter->rxd_ring_size / 8) * 7; adapter->hw.fc_rxd_lo = (ATL2_MIN_RXD_COUNT / 8) > (adapter->rxd_ring_size / 12) ? (ATL2_MIN_RXD_COUNT / 8) : (adapter->rxd_ring_size / 12); /* Interrupt Moderate Timer */ opt.type = range_option; opt.name = "Interrupt Moderate Timer"; opt.err = "using default of " __MODULE_STRING(INT_MOD_DEFAULT_CNT); opt.def = INT_MOD_DEFAULT_CNT; opt.arg.r.min = INT_MOD_MIN_CNT; opt.arg.r.max = INT_MOD_MAX_CNT; #ifdef module_param_array if (num_IntModTimer > bd) { #endif val = IntModTimer[bd]; atl2_validate_option(&val, &opt); adapter->imt = (u16) val; #ifdef module_param_array } else adapter->imt = (u16)(opt.def); #endif /* Flash Vendor */ opt.type = range_option; opt.name = "SPI Flash Vendor"; opt.err = "using default of " __MODULE_STRING(FLASH_VENDOR_DEFAULT); opt.def = FLASH_VENDOR_DEFAULT; opt.arg.r.min = FLASH_VENDOR_MIN; opt.arg.r.max = FLASH_VENDOR_MAX; #ifdef module_param_array if (num_FlashVendor > bd) { #endif val = FlashVendor[bd]; atl2_validate_option(&val, &opt); adapter->hw.flash_vendor = (u8) val; #ifdef module_param_array } else adapter->hw.flash_vendor = (u8)(opt.def); #endif /* MediaType */ opt.type = range_option; opt.name = "Speed/Duplex Selection"; opt.err = "using default of " __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR); opt.def = MEDIA_TYPE_AUTO_SENSOR; opt.arg.r.min = MEDIA_TYPE_AUTO_SENSOR; opt.arg.r.max = MEDIA_TYPE_10M_HALF; #ifdef module_param_array if (num_MediaType > bd) { #endif val = MediaType[bd]; atl2_validate_option(&val, &opt); adapter->hw.MediaType = (u16) val; #ifdef module_param_array } else adapter->hw.MediaType = (u16)(opt.def); #endif }
linux-master
drivers/net/ethernet/atheros/atlx/atl2.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. * Copyright(c) 2006 - 2007 Chris Snook <[email protected]> * Copyright(c) 2006 - 2008 Jay Cliburn <[email protected]> * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. * * Contact Information: * Xiong Huang <[email protected]> * Jie Yang <[email protected]> * Chris Snook <[email protected]> * Jay Cliburn <[email protected]> * * This version is adapted from the Attansic reference driver. * * TODO: * Add more ethtool functions. * Fix abstruse irq enable/disable condition described here: * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 * * NEEDS TESTING: * VLAN * multicast * promiscuous mode * interrupt coalescing * SMP torture testing */ #include <linux/atomic.h> #include <asm/byteorder.h> #include <linux/compiler.h> #include <linux/crc32.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/hardirq.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/in.h> #include <linux/interrupt.h> #include <linux/ip.h> #include <linux/irqflags.h> #include <linux/irqreturn.h> #include <linux/jiffies.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/pm.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/tcp.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/workqueue.h> #include <net/checksum.h> #include "atl1.h" MODULE_AUTHOR("Xiong Huang <[email protected]>, " "Chris Snook <[email protected]>, " "Jay Cliburn <[email protected]>"); MODULE_LICENSE("GPL"); /* Temporary hack for merging atl1 and atl2 */ #include "atlx.c" static const struct ethtool_ops atl1_ethtool_ops; /* * This is the only thing that needs to be changed to adjust the * maximum number of ports that the driver can manage. */ #define ATL1_MAX_NIC 4 #define OPTION_UNSET -1 #define OPTION_DISABLED 0 #define OPTION_ENABLED 1 #define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET } /* * Interrupt Moderate Timer in units of 2 us * * Valid Range: 10-65535 * * Default Value: 100 (200us) */ static int int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; static unsigned int num_int_mod_timer; module_param_array_named(int_mod_timer, int_mod_timer, int, &num_int_mod_timer, 0); MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer"); #define DEFAULT_INT_MOD_CNT 100 /* 200us */ #define MAX_INT_MOD_CNT 65000 #define MIN_INT_MOD_CNT 50 struct atl1_option { enum { enable_option, range_option, list_option } type; char *name; char *err; int def; union { struct { /* range_option info */ int min; int max; } r; struct { /* list_option info */ int nr; struct atl1_opt_list { int i; char *str; } *p; } l; } arg; }; static int atl1_validate_option(int *value, struct atl1_option *opt, struct pci_dev *pdev) { if (*value == OPTION_UNSET) { *value = opt->def; return 0; } switch (opt->type) { case enable_option: switch (*value) { case OPTION_ENABLED: dev_info(&pdev->dev, "%s enabled\n", opt->name); return 0; case OPTION_DISABLED: dev_info(&pdev->dev, "%s disabled\n", opt->name); return 0; } break; case range_option: if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { dev_info(&pdev->dev, "%s set to %i\n", opt->name, *value); return 0; } break; case list_option:{ int i; struct atl1_opt_list *ent; for (i = 0; i < opt->arg.l.nr; i++) { ent = &opt->arg.l.p[i]; if (*value == ent->i) { if (ent->str[0] != '\0') dev_info(&pdev->dev, "%s\n", ent->str); return 0; } } } break; default: break; } dev_info(&pdev->dev, "invalid %s specified (%i) %s\n", opt->name, *value, opt->err); *value = opt->def; return -1; } /** * atl1_check_options - Range Checking for Command Line Parameters * @adapter: board private structure * * This routine checks all command line parameters for valid user * input. If an invalid value is given, or if no user specified * value exists, a default value is used. The final value is stored * in a variable in the adapter structure. */ static void atl1_check_options(struct atl1_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int bd = adapter->bd_number; if (bd >= ATL1_MAX_NIC) { dev_notice(&pdev->dev, "no configuration for board#%i\n", bd); dev_notice(&pdev->dev, "using defaults for all values\n"); } { /* Interrupt Moderate Timer */ struct atl1_option opt = { .type = range_option, .name = "Interrupt Moderator Timer", .err = "using default of " __MODULE_STRING(DEFAULT_INT_MOD_CNT), .def = DEFAULT_INT_MOD_CNT, .arg = {.r = {.min = MIN_INT_MOD_CNT, .max = MAX_INT_MOD_CNT} } }; int val; if (num_int_mod_timer > bd) { val = int_mod_timer[bd]; atl1_validate_option(&val, &opt, pdev); adapter->imt = (u16) val; } else adapter->imt = (u16) (opt.def); } } /* * atl1_pci_tbl - PCI Device ID Table */ static const struct pci_device_id atl1_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)}, /* required last entry */ {0,} }; MODULE_DEVICE_TABLE(pci, atl1_pci_tbl); static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)"); /* * Reset the transmit and receive units; mask and clear all interrupts. * hw - Struct containing variables accessed by shared code * return : 0 or idle status (if error) */ static s32 atl1_reset_hw(struct atl1_hw *hw) { struct pci_dev *pdev = hw->back->pdev; struct atl1_adapter *adapter = hw->back; u32 icr; int i; /* * Clear Interrupt mask to stop board from generating * interrupts & Clear any pending interrupt events */ /* * atlx_irq_disable(adapter); * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); */ /* * Issue Soft Reset to the MAC. This will reset the chip's * transmit, receive, DMA. It will not effect * the current PCI configuration. The global reset bit is self- * clearing, and should clear within a microsecond. */ iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL); ioread32(hw->hw_addr + REG_MASTER_CTRL); iowrite16(1, hw->hw_addr + REG_PHY_ENABLE); ioread16(hw->hw_addr + REG_PHY_ENABLE); /* delay about 1ms */ msleep(1); /* Wait at least 10ms for All module to be Idle */ for (i = 0; i < 10; i++) { icr = ioread32(hw->hw_addr + REG_IDLE_STATUS); if (!icr) break; /* delay 1 ms */ msleep(1); /* FIXME: still the right way to do this? */ cpu_relax(); } if (icr) { if (netif_msg_hw(adapter)) dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr); return icr; } return 0; } /* function about EEPROM * * check_eeprom_exist * return 0 if eeprom exist */ static int atl1_check_eeprom_exist(struct atl1_hw *hw) { u32 value; value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); if (value & SPI_FLASH_CTRL_EN_VPD) { value &= ~SPI_FLASH_CTRL_EN_VPD; iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); } value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST); return ((value & 0xFF00) == 0x6C00) ? 0 : 1; } static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value) { int i; u32 control; if (offset & 3) /* address do not align */ return false; iowrite32(0, hw->hw_addr + REG_VPD_DATA); control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; iowrite32(control, hw->hw_addr + REG_VPD_CAP); ioread32(hw->hw_addr + REG_VPD_CAP); for (i = 0; i < 10; i++) { msleep(2); control = ioread32(hw->hw_addr + REG_VPD_CAP); if (control & VPD_CAP_VPD_FLAG) break; } if (control & VPD_CAP_VPD_FLAG) { *p_value = ioread32(hw->hw_addr + REG_VPD_DATA); return true; } /* timeout */ return false; } /* * Reads the value from a PHY register * hw - Struct containing variables accessed by shared code * reg_addr - address of the PHY register to read */ static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data) { u32 val; int i; val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); ioread32(hw->hw_addr + REG_MDIO_CTRL); for (i = 0; i < MDIO_WAIT_TIMES; i++) { udelay(2); val = ioread32(hw->hw_addr + REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; } if (!(val & (MDIO_START | MDIO_BUSY))) { *phy_data = (u16) val; return 0; } return ATLX_ERR_PHY; } #define CUSTOM_SPI_CS_SETUP 2 #define CUSTOM_SPI_CLK_HI 2 #define CUSTOM_SPI_CLK_LO 2 #define CUSTOM_SPI_CS_HOLD 2 #define CUSTOM_SPI_CS_HI 3 static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf) { int i; u32 value; iowrite32(0, hw->hw_addr + REG_SPI_DATA); iowrite32(addr, hw->hw_addr + REG_SPI_ADDR); value = SPI_FLASH_CTRL_WAIT_READY | (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) << SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) << SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) << SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) << SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT; iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); value |= SPI_FLASH_CTRL_START; iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); for (i = 0; i < 10; i++) { msleep(1); value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); if (!(value & SPI_FLASH_CTRL_START)) break; } if (value & SPI_FLASH_CTRL_START) return false; *buf = ioread32(hw->hw_addr + REG_SPI_DATA); return true; } /* * get_permanent_address * return 0 if get valid mac address, */ static int atl1_get_permanent_address(struct atl1_hw *hw) { u32 addr[2]; u32 i, control; u16 reg; u8 eth_addr[ETH_ALEN]; bool key_valid; if (is_valid_ether_addr(hw->perm_mac_addr)) return 0; /* init */ addr[0] = addr[1] = 0; if (!atl1_check_eeprom_exist(hw)) { reg = 0; key_valid = false; /* Read out all EEPROM content */ i = 0; while (1) { if (atl1_read_eeprom(hw, i + 0x100, &control)) { if (key_valid) { if (reg == REG_MAC_STA_ADDR) addr[0] = control; else if (reg == (REG_MAC_STA_ADDR + 4)) addr[1] = control; key_valid = false; } else if ((control & 0xff) == 0x5A) { key_valid = true; reg = (u16) (control >> 16); } else break; } else /* read error */ break; i += 4; } *(u32 *) &eth_addr[2] = swab32(addr[0]); *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]); if (is_valid_ether_addr(eth_addr)) { memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); return 0; } } /* see if SPI FLAGS exist ? */ addr[0] = addr[1] = 0; reg = 0; key_valid = false; i = 0; while (1) { if (atl1_spi_read(hw, i + 0x1f000, &control)) { if (key_valid) { if (reg == REG_MAC_STA_ADDR) addr[0] = control; else if (reg == (REG_MAC_STA_ADDR + 4)) addr[1] = control; key_valid = false; } else if ((control & 0xff) == 0x5A) { key_valid = true; reg = (u16) (control >> 16); } else /* data end */ break; } else /* read error */ break; i += 4; } *(u32 *) &eth_addr[2] = swab32(addr[0]); *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]); if (is_valid_ether_addr(eth_addr)) { memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); return 0; } /* * On some motherboards, the MAC address is written by the * BIOS directly to the MAC register during POST, and is * not stored in eeprom. If all else thus far has failed * to fetch the permanent MAC address, try reading it directly. */ addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR); addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4)); *(u32 *) &eth_addr[2] = swab32(addr[0]); *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]); if (is_valid_ether_addr(eth_addr)) { memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); return 0; } return 1; } /* * Reads the adapter's MAC address from the EEPROM * hw - Struct containing variables accessed by shared code */ static s32 atl1_read_mac_addr(struct atl1_hw *hw) { s32 ret = 0; u16 i; if (atl1_get_permanent_address(hw)) { eth_random_addr(hw->perm_mac_addr); ret = 1; } for (i = 0; i < ETH_ALEN; i++) hw->mac_addr[i] = hw->perm_mac_addr[i]; return ret; } /* * Hashes an address to determine its location in the multicast table * hw - Struct containing variables accessed by shared code * mc_addr - the multicast address to hash * * atl1_hash_mc_addr * purpose * set hash value for a multicast address * hash calcu processing : * 1. calcu 32bit CRC for multicast address * 2. reverse crc with MSB to LSB */ static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr) { u32 crc32, value = 0; int i; crc32 = ether_crc_le(6, mc_addr); for (i = 0; i < 32; i++) value |= (((crc32 >> i) & 1) << (31 - i)); return value; } /* * Sets the bit in the multicast table corresponding to the hash value. * hw - Struct containing variables accessed by shared code * hash_value - Multicast address hash value */ static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value) { u32 hash_bit, hash_reg; u32 mta; /* * The HASH Table is a register array of 2 32-bit registers. * It is treated like an array of 64 bits. We want to set * bit BitArray[hash_value]. So we figure out what register * the bit is in, read it, OR in the new bit, then write * back the new value. The register is determined by the * upper 7 bits of the hash value and the bit within that * register are determined by the lower 5 bits of the value. */ hash_reg = (hash_value >> 31) & 0x1; hash_bit = (hash_value >> 26) & 0x1F; mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); mta |= (1 << hash_bit); iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); } /* * Writes a value to a PHY register * hw - Struct containing variables accessed by shared code * reg_addr - address of the PHY register to write * data - data to write to the PHY */ static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data) { int i; u32 val; val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | MDIO_SUP_PREAMBLE | MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); ioread32(hw->hw_addr + REG_MDIO_CTRL); for (i = 0; i < MDIO_WAIT_TIMES; i++) { udelay(2); val = ioread32(hw->hw_addr + REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; } if (!(val & (MDIO_START | MDIO_BUSY))) return 0; return ATLX_ERR_PHY; } /* * Make L001's PHY out of Power Saving State (bug) * hw - Struct containing variables accessed by shared code * when power on, L001's PHY always on Power saving State * (Gigabit Link forbidden) */ static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) { s32 ret; ret = atl1_write_phy_reg(hw, 29, 0x0029); if (ret) return ret; return atl1_write_phy_reg(hw, 30, 0); } /* * Resets the PHY and make all config validate * hw - Struct containing variables accessed by shared code * * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) */ static s32 atl1_phy_reset(struct atl1_hw *hw) { struct pci_dev *pdev = hw->back->pdev; struct atl1_adapter *adapter = hw->back; s32 ret_val; u16 phy_data; if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; else { switch (hw->media_type) { case MEDIA_TYPE_100M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_100M_HALF: phy_data = MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_10M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; break; default: /* MEDIA_TYPE_10M_HALF: */ phy_data = MII_CR_SPEED_10 | MII_CR_RESET; break; } } ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data); if (ret_val) { u32 val; int i; /* pcie serdes link may be down! */ if (netif_msg_hw(adapter)) dev_dbg(&pdev->dev, "pcie phy link down\n"); for (i = 0; i < 25; i++) { msleep(1); val = ioread32(hw->hw_addr + REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; } if ((val & (MDIO_START | MDIO_BUSY)) != 0) { if (netif_msg_hw(adapter)) dev_warn(&pdev->dev, "pcie link down at least 25ms\n"); return ret_val; } } return 0; } /* * Configures PHY autoneg and flow control advertisement settings * hw - Struct containing variables accessed by shared code */ static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw) { s32 ret_val; s16 mii_autoneg_adv_reg; s16 mii_1000t_ctrl_reg; /* Read the MII Auto-Neg Advertisement Register (Address 4). */ mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; /* Read the MII 1000Base-T Control Register (Address 9). */ mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK; /* * First we clear all the 10/100 mb speed bits in the Auto-Neg * Advertisement Register (Address 4) and the 1000 mb speed bits in * the 1000Base-T Control Register (Address 9). */ mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK; /* * Need to parse media_type and set up * the appropriate PHY registers. */ switch (hw->media_type) { case MEDIA_TYPE_AUTO_SENSOR: mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | MII_AR_10T_FD_CAPS | MII_AR_100TX_HD_CAPS | MII_AR_100TX_FD_CAPS); mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; break; case MEDIA_TYPE_1000M_FULL: mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; break; case MEDIA_TYPE_100M_FULL: mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; break; case MEDIA_TYPE_100M_HALF: mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; break; case MEDIA_TYPE_10M_FULL: mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; break; default: mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; break; } /* flow control fixed to enable all */ mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); if (ret_val) return ret_val; ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg); if (ret_val) return ret_val; return 0; } /* * Configures link settings. * hw - Struct containing variables accessed by shared code * Assumes the hardware has previously been reset and the * transmitter and receiver are not enabled. */ static s32 atl1_setup_link(struct atl1_hw *hw) { struct pci_dev *pdev = hw->back->pdev; struct atl1_adapter *adapter = hw->back; s32 ret_val; /* * Options: * PHY will advertise value(s) parsed from * autoneg_advertised and fc * no matter what autoneg is , We will not wait link result. */ ret_val = atl1_phy_setup_autoneg_adv(hw); if (ret_val) { if (netif_msg_link(adapter)) dev_dbg(&pdev->dev, "error setting up autonegotiation\n"); return ret_val; } /* SW.Reset , En-Auto-Neg if needed */ ret_val = atl1_phy_reset(hw); if (ret_val) { if (netif_msg_link(adapter)) dev_dbg(&pdev->dev, "error resetting phy\n"); return ret_val; } hw->phy_configured = true; return ret_val; } static void atl1_init_flash_opcode(struct atl1_hw *hw) { if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) /* Atmel */ hw->flash_vendor = 0; /* Init OP table */ iowrite8(flash_table[hw->flash_vendor].cmd_program, hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM); iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase, hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE); iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase, hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE); iowrite8(flash_table[hw->flash_vendor].cmd_rdid, hw->hw_addr + REG_SPI_FLASH_OP_RDID); iowrite8(flash_table[hw->flash_vendor].cmd_wren, hw->hw_addr + REG_SPI_FLASH_OP_WREN); iowrite8(flash_table[hw->flash_vendor].cmd_rdsr, hw->hw_addr + REG_SPI_FLASH_OP_RDSR); iowrite8(flash_table[hw->flash_vendor].cmd_wrsr, hw->hw_addr + REG_SPI_FLASH_OP_WRSR); iowrite8(flash_table[hw->flash_vendor].cmd_read, hw->hw_addr + REG_SPI_FLASH_OP_READ); } /* * Performs basic configuration of the adapter. * hw - Struct containing variables accessed by shared code * Assumes that the controller has previously been reset and is in a * post-reset uninitialized state. Initializes multicast table, * and Calls routines to setup link * Leaves the transmit and receive units disabled and uninitialized. */ static s32 atl1_init_hw(struct atl1_hw *hw) { u32 ret_val = 0; /* Zero out the Multicast HASH table */ iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); /* clear the old settings from the multicast hash table */ iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); atl1_init_flash_opcode(hw); if (!hw->phy_configured) { /* enable GPHY LinkChange Interrupt */ ret_val = atl1_write_phy_reg(hw, 18, 0xC00); if (ret_val) return ret_val; /* make PHY out of power-saving state */ ret_val = atl1_phy_leave_power_saving(hw); if (ret_val) return ret_val; /* Call a subroutine to configure the link */ ret_val = atl1_setup_link(hw); } return ret_val; } /* * Detects the current speed and duplex settings of the hardware. * hw - Struct containing variables accessed by shared code * speed - Speed of the connection * duplex - Duplex setting of the connection */ static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) { struct pci_dev *pdev = hw->back->pdev; struct atl1_adapter *adapter = hw->back; s32 ret_val; u16 phy_data; /* ; --- Read PHY Specific Status Register (17) */ ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data); if (ret_val) return ret_val; if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED)) return ATLX_ERR_PHY_RES; switch (phy_data & MII_ATLX_PSSR_SPEED) { case MII_ATLX_PSSR_1000MBS: *speed = SPEED_1000; break; case MII_ATLX_PSSR_100MBS: *speed = SPEED_100; break; case MII_ATLX_PSSR_10MBS: *speed = SPEED_10; break; default: if (netif_msg_hw(adapter)) dev_dbg(&pdev->dev, "error getting speed\n"); return ATLX_ERR_PHY_SPEED; } if (phy_data & MII_ATLX_PSSR_DPLX) *duplex = FULL_DUPLEX; else *duplex = HALF_DUPLEX; return 0; } static void atl1_set_mac_addr(struct atl1_hw *hw) { u32 value; /* * 00-0B-6A-F6-00-DC * 0: 6AF600DC 1: 000B * low dword */ value = (((u32) hw->mac_addr[2]) << 24) | (((u32) hw->mac_addr[3]) << 16) | (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); /* high dword */ value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); } /** * atl1_sw_init - Initialize general software structures (struct atl1_adapter) * @adapter: board private structure to initialize * * atl1_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). */ static int atl1_sw_init(struct atl1_adapter *adapter) { struct atl1_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; adapter->wol = 0; device_set_wakeup_enable(&adapter->pdev->dev, false); adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; adapter->ict = 50000; /* 100ms */ adapter->link_speed = SPEED_0; /* hardware init */ adapter->link_duplex = FULL_DUPLEX; hw->phy_configured = false; hw->preamble_len = 7; hw->ipgt = 0x60; hw->min_ifg = 0x50; hw->ipgr1 = 0x40; hw->ipgr2 = 0x60; hw->max_retry = 0xf; hw->lcol = 0x37; hw->jam_ipg = 7; hw->rfd_burst = 8; hw->rrd_burst = 8; hw->rfd_fetch_gap = 1; hw->rx_jumbo_th = adapter->rx_buffer_len / 8; hw->rx_jumbo_lkah = 1; hw->rrd_ret_timer = 16; hw->tpd_burst = 4; hw->tpd_fetch_th = 16; hw->txf_burst = 0x100; hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3; hw->tpd_fetch_gap = 1; hw->rcb_value = atl1_rcb_64; hw->dma_ord = atl1_dma_ord_enh; hw->dmar_block = atl1_dma_req_256; hw->dmaw_block = atl1_dma_req_256; hw->cmb_rrd = 4; hw->cmb_tpd = 4; hw->cmb_rx_timer = 1; /* about 2us */ hw->cmb_tx_timer = 1; /* about 2us */ hw->smb_timer = 100000; /* about 200ms */ spin_lock_init(&adapter->lock); spin_lock_init(&adapter->mb_lock); return 0; } static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) { struct atl1_adapter *adapter = netdev_priv(netdev); u16 result; atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); return result; } static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val) { struct atl1_adapter *adapter = netdev_priv(netdev); atl1_write_phy_reg(&adapter->hw, reg_num, val); } static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct atl1_adapter *adapter = netdev_priv(netdev); unsigned long flags; int retval; if (!netif_running(netdev)) return -EINVAL; spin_lock_irqsave(&adapter->lock, flags); retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); spin_unlock_irqrestore(&adapter->lock, flags); return retval; } /** * atl1_setup_ring_resources - allocate Tx / RX descriptor resources * @adapter: board private structure * * Return 0 on success, negative on failure */ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1_ring_header *ring_header = &adapter->ring_header; struct pci_dev *pdev = adapter->pdev; int size; u8 offset = 0; size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); if (unlikely(!tpd_ring->buffer_info)) { if (netif_msg_drv(adapter)) dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size); goto err_nomem; } rfd_ring->buffer_info = (tpd_ring->buffer_info + tpd_ring->count); /* * real ring DMA buffer * each ring/block may need up to 8 bytes for alignment, hence the * additional 40 bytes tacked onto the end. */ ring_header->size = sizeof(struct tx_packet_desc) * tpd_ring->count + sizeof(struct rx_free_desc) * rfd_ring->count + sizeof(struct rx_return_desc) * rrd_ring->count + sizeof(struct coals_msg_block) + sizeof(struct stats_msg_block) + 40; ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size, &ring_header->dma, GFP_KERNEL); if (unlikely(!ring_header->desc)) { if (netif_msg_drv(adapter)) dev_err(&pdev->dev, "dma_alloc_coherent failed\n"); goto err_nomem; } /* init TPD ring */ tpd_ring->dma = ring_header->dma; offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0; tpd_ring->dma += offset; tpd_ring->desc = (u8 *) ring_header->desc + offset; tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; /* init RFD ring */ rfd_ring->dma = tpd_ring->dma + tpd_ring->size; offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0; rfd_ring->dma += offset; rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; /* init RRD ring */ rrd_ring->dma = rfd_ring->dma + rfd_ring->size; offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0; rrd_ring->dma += offset; rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; /* init CMB */ adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; adapter->cmb.dma += offset; adapter->cmb.cmb = (struct coals_msg_block *) ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); /* init SMB */ adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; adapter->smb.dma += offset; adapter->smb.smb = (struct stats_msg_block *) ((u8 *) adapter->cmb.cmb + (sizeof(struct coals_msg_block) + offset)); return 0; err_nomem: kfree(tpd_ring->buffer_info); return -ENOMEM; } static void atl1_init_ring_ptrs(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; atomic_set(&tpd_ring->next_to_use, 0); atomic_set(&tpd_ring->next_to_clean, 0); rfd_ring->next_to_clean = 0; atomic_set(&rfd_ring->next_to_use, 0); rrd_ring->next_to_use = 0; atomic_set(&rrd_ring->next_to_clean, 0); } /** * atl1_clean_rx_ring - Free RFD Buffers * @adapter: board private structure */ static void atl1_clean_rx_ring(struct atl1_adapter *adapter) { struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; /* Free all the Rx ring sk_buffs */ for (i = 0; i < rfd_ring->count; i++) { buffer_info = &rfd_ring->buffer_info[i]; if (buffer_info->dma) { dma_unmap_page(&pdev->dev, buffer_info->dma, buffer_info->length, DMA_FROM_DEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; } } size = sizeof(struct atl1_buffer) * rfd_ring->count; memset(rfd_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ memset(rfd_ring->desc, 0, rfd_ring->size); rfd_ring->next_to_clean = 0; atomic_set(&rfd_ring->next_to_use, 0); rrd_ring->next_to_use = 0; atomic_set(&rrd_ring->next_to_clean, 0); } /** * atl1_clean_tx_ring - Free Tx Buffers * @adapter: board private structure */ static void atl1_clean_tx_ring(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; /* Free all the Tx ring sk_buffs */ for (i = 0; i < tpd_ring->count; i++) { buffer_info = &tpd_ring->buffer_info[i]; if (buffer_info->dma) { dma_unmap_page(&pdev->dev, buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); buffer_info->dma = 0; } } for (i = 0; i < tpd_ring->count; i++) { buffer_info = &tpd_ring->buffer_info[i]; if (buffer_info->skb) { dev_kfree_skb_any(buffer_info->skb); buffer_info->skb = NULL; } } size = sizeof(struct atl1_buffer) * tpd_ring->count; memset(tpd_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ memset(tpd_ring->desc, 0, tpd_ring->size); atomic_set(&tpd_ring->next_to_use, 0); atomic_set(&tpd_ring->next_to_clean, 0); } /** * atl1_free_ring_resources - Free Tx / RX descriptor Resources * @adapter: board private structure * * Free all transmit software resources */ static void atl1_free_ring_resources(struct atl1_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1_ring_header *ring_header = &adapter->ring_header; atl1_clean_tx_ring(adapter); atl1_clean_rx_ring(adapter); kfree(tpd_ring->buffer_info); dma_free_coherent(&pdev->dev, ring_header->size, ring_header->desc, ring_header->dma); tpd_ring->buffer_info = NULL; tpd_ring->desc = NULL; tpd_ring->dma = 0; rfd_ring->buffer_info = NULL; rfd_ring->desc = NULL; rfd_ring->dma = 0; rrd_ring->desc = NULL; rrd_ring->dma = 0; adapter->cmb.dma = 0; adapter->cmb.cmb = NULL; adapter->smb.dma = 0; adapter->smb.smb = NULL; } static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) { u32 value; struct atl1_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; /* Config MAC CTRL Register */ value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; /* duplex */ if (FULL_DUPLEX == adapter->link_duplex) value |= MAC_CTRL_DUPLX; /* speed */ value |= ((u32) ((SPEED_1000 == adapter->link_speed) ? MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); /* flow control */ value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); /* PAD & CRC */ value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); /* preamble length */ value |= (((u32) adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); /* vlan */ __atlx_vlan_mode(netdev->features, &value); /* rx checksum if (adapter->rx_csum) value |= MAC_CTRL_RX_CHKSUM_EN; */ /* filter mode */ value |= MAC_CTRL_BC_EN; if (netdev->flags & IFF_PROMISC) value |= MAC_CTRL_PROMIS_EN; else if (netdev->flags & IFF_ALLMULTI) value |= MAC_CTRL_MC_ALL_EN; /* value |= MAC_CTRL_LOOPBACK; */ iowrite32(value, hw->hw_addr + REG_MAC_CTRL); } static u32 atl1_check_link(struct atl1_adapter *adapter) { struct atl1_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; u32 ret_val; u16 speed, duplex, phy_data; int reconfig = 0; /* MII_BMSR must read twice */ atl1_read_phy_reg(hw, MII_BMSR, &phy_data); atl1_read_phy_reg(hw, MII_BMSR, &phy_data); if (!(phy_data & BMSR_LSTATUS)) { /* link down */ if (netif_carrier_ok(netdev)) { /* old link state: Up */ if (netif_msg_link(adapter)) dev_info(&adapter->pdev->dev, "link is down\n"); adapter->link_speed = SPEED_0; netif_carrier_off(netdev); } return 0; } /* Link Up */ ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex); if (ret_val) return ret_val; switch (hw->media_type) { case MEDIA_TYPE_1000M_FULL: if (speed != SPEED_1000 || duplex != FULL_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_100M_FULL: if (speed != SPEED_100 || duplex != FULL_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_100M_HALF: if (speed != SPEED_100 || duplex != HALF_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_10M_FULL: if (speed != SPEED_10 || duplex != FULL_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_10M_HALF: if (speed != SPEED_10 || duplex != HALF_DUPLEX) reconfig = 1; break; } /* link result is our setting */ if (!reconfig) { if (adapter->link_speed != speed || adapter->link_duplex != duplex) { adapter->link_speed = speed; adapter->link_duplex = duplex; atl1_setup_mac_ctrl(adapter); if (netif_msg_link(adapter)) dev_info(&adapter->pdev->dev, "%s link is up %d Mbps %s\n", netdev->name, adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "full duplex" : "half duplex"); } if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ netif_carrier_on(netdev); } return 0; } /* change original link status */ if (netif_carrier_ok(netdev)) { adapter->link_speed = SPEED_0; netif_carrier_off(netdev); netif_stop_queue(netdev); } if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR && hw->media_type != MEDIA_TYPE_1000M_FULL) { switch (hw->media_type) { case MEDIA_TYPE_100M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_100M_HALF: phy_data = MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_10M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; break; default: /* MEDIA_TYPE_10M_HALF: */ phy_data = MII_CR_SPEED_10 | MII_CR_RESET; break; } atl1_write_phy_reg(hw, MII_BMCR, phy_data); return 0; } /* auto-neg, insert timer to re-config phy */ if (!adapter->phy_timer_pending) { adapter->phy_timer_pending = true; mod_timer(&adapter->phy_config_timer, round_jiffies(jiffies + 3 * HZ)); } return 0; } static void set_flow_ctrl_old(struct atl1_adapter *adapter) { u32 hi, lo, value; /* RFD Flow Control */ value = adapter->rfd_ring.count; hi = value / 16; if (hi < 2) hi = 2; lo = value * 7 / 8; value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); /* RRD Flow Control */ value = adapter->rrd_ring.count; lo = value / 16; hi = value * 7 / 8; if (lo < 2) lo = 2; value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); } static void set_flow_ctrl_new(struct atl1_hw *hw) { u32 hi, lo, value; /* RXF Flow Control */ value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN); lo = value / 16; if (lo < 192) lo = 192; hi = value * 7 / 8; if (hi < lo) hi = lo + 16; value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); /* RRD Flow Control */ value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN); lo = value / 8; hi = value * 7 / 8; if (lo < 2) lo = 2; if (hi < lo) hi = lo + 3; value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); } /** * atl1_configure - Configure Transmit&Receive Unit after Reset * @adapter: board private structure * * Configure the Tx /Rx unit of the MAC after a reset. */ static u32 atl1_configure(struct atl1_adapter *adapter) { struct atl1_hw *hw = &adapter->hw; u32 value; /* clear interrupt status */ iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR); /* set MAC Address */ value = (((u32) hw->mac_addr[2]) << 24) | (((u32) hw->mac_addr[3]) << 16) | (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4)); /* tx / rx ring */ /* HI base address */ iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32), hw->hw_addr + REG_DESC_BASE_ADDR_HI); /* LO base address */ iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL), hw->hw_addr + REG_DESC_RFD_ADDR_LO); iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL), hw->hw_addr + REG_DESC_RRD_ADDR_LO); iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL), hw->hw_addr + REG_DESC_TPD_ADDR_LO); iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL), hw->hw_addr + REG_DESC_CMB_ADDR_LO); iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL), hw->hw_addr + REG_DESC_SMB_ADDR_LO); /* element count */ value = adapter->rrd_ring.count; value <<= 16; value += adapter->rfd_ring.count; iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); iowrite32(adapter->tpd_ring.count, hw->hw_addr + REG_DESC_TPD_RING_SIZE); /* Load Ptr */ iowrite32(1, hw->hw_addr + REG_LOAD_PTR); /* config Mailbox */ value = ((atomic_read(&adapter->tpd_ring.next_to_use) & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | ((atomic_read(&adapter->rrd_ring.next_to_clean) & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | ((atomic_read(&adapter->rfd_ring.next_to_use) & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); iowrite32(value, hw->hw_addr + REG_MAILBOX); /* config IPG/IFG */ value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) << MAC_IPG_IFG_IPGT_SHIFT) | (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) << MAC_IPG_IFG_MIFG_SHIFT) | (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) << MAC_IPG_IFG_IPGR1_SHIFT) | (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) << MAC_IPG_IFG_IPGR2_SHIFT); iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); /* config Half-Duplex Control */ value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); /* set Interrupt Moderator Timer */ iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT); iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL); /* set Interrupt Clear Timer */ iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); /* set max frame size hw will accept */ iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU); /* jumbo size & rrd retirement timer */ value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) << RXQ_JMBOSZ_TH_SHIFT) | (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) << RXQ_JMBO_LKAH_SHIFT) | (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) << RXQ_RRD_TIMER_SHIFT); iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); /* Flow Control */ switch (hw->dev_rev) { case 0x8001: case 0x9001: case 0x9002: case 0x9003: set_flow_ctrl_old(adapter); break; default: set_flow_ctrl_new(hw); break; } /* config TXQ */ value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN; iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) << TX_JUMBO_TASK_TH_SHIFT) | (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) << TX_TPD_MIN_IPG_SHIFT); iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); /* config RXQ */ value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); /* config DMA Engine */ value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | DMA_CTRL_DMAW_EN; value |= (u32) hw->dma_ord; if (atl1_rcb_128 == hw->rcb_value) value |= DMA_CTRL_RCB_VALUE; iowrite32(value, hw->hw_addr + REG_DMA_CTRL); /* config CMB / SMB */ value = (hw->cmb_tpd > adapter->tpd_ring.count) ? hw->cmb_tpd : adapter->tpd_ring.count; value <<= 16; value |= hw->cmb_rrd; iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER); /* --- enable CMB / SMB */ value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN; iowrite32(value, hw->hw_addr + REG_CSMB_CTRL); value = ioread32(adapter->hw.hw_addr + REG_ISR); if (unlikely((value & ISR_PHY_LINKDOWN) != 0)) value = 1; /* config failed */ else value = 0; /* clear all interrupt status */ iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR); iowrite32(0, adapter->hw.hw_addr + REG_ISR); return value; } /* * atl1_pcie_patch - Patch for PCIE module */ static void atl1_pcie_patch(struct atl1_adapter *adapter) { u32 value; /* much vendor magic here */ value = 0x6500; iowrite32(value, adapter->hw.hw_addr + 0x12FC); /* pcie flow control mode change */ value = ioread32(adapter->hw.hw_addr + 0x1008); value |= 0x8000; iowrite32(value, adapter->hw.hw_addr + 0x1008); } /* * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 * on PCI Command register is disable. * The function enable this bit. * Brackett, 2006/03/15 */ static void atl1_via_workaround(struct atl1_adapter *adapter) { unsigned long value; value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); if (value & PCI_COMMAND_INTX_DISABLE) value &= ~PCI_COMMAND_INTX_DISABLE; iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); } static void atl1_inc_smb(struct atl1_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct stats_msg_block *smb = adapter->smb.smb; u64 new_rx_errors = smb->rx_frag + smb->rx_fcs_err + smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + smb->rx_rrd_ov + smb->rx_align_err; u64 new_tx_errors = smb->tx_late_col + smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc; /* Fill out the OS statistics structure */ adapter->soft_stats.rx_packets += smb->rx_ok + new_rx_errors; adapter->soft_stats.tx_packets += smb->tx_ok + new_tx_errors; adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; adapter->soft_stats.multicast += smb->rx_mcast; adapter->soft_stats.collisions += smb->tx_1_col + smb->tx_2_col + smb->tx_late_col + smb->tx_abort_col; /* Rx Errors */ adapter->soft_stats.rx_errors += new_rx_errors; adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; adapter->soft_stats.rx_length_errors += smb->rx_len_err; adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; adapter->soft_stats.rx_frame_errors += smb->rx_align_err; adapter->soft_stats.rx_pause += smb->rx_pause; adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; adapter->soft_stats.rx_trunc += smb->rx_sz_ov; /* Tx Errors */ adapter->soft_stats.tx_errors += new_tx_errors; adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; adapter->soft_stats.tx_window_errors += smb->tx_late_col; adapter->soft_stats.excecol += smb->tx_abort_col; adapter->soft_stats.deffer += smb->tx_defer; adapter->soft_stats.scc += smb->tx_1_col; adapter->soft_stats.mcc += smb->tx_2_col; adapter->soft_stats.latecol += smb->tx_late_col; adapter->soft_stats.tx_underrun += smb->tx_underrun; adapter->soft_stats.tx_trunc += smb->tx_trunc; adapter->soft_stats.tx_pause += smb->tx_pause; netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes; netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes; netdev->stats.multicast = adapter->soft_stats.multicast; netdev->stats.collisions = adapter->soft_stats.collisions; netdev->stats.rx_errors = adapter->soft_stats.rx_errors; netdev->stats.rx_length_errors = adapter->soft_stats.rx_length_errors; netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; netdev->stats.rx_frame_errors = adapter->soft_stats.rx_frame_errors; netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; netdev->stats.rx_dropped = adapter->soft_stats.rx_rrd_ov; netdev->stats.tx_errors = adapter->soft_stats.tx_errors; netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; netdev->stats.tx_aborted_errors = adapter->soft_stats.tx_aborted_errors; netdev->stats.tx_window_errors = adapter->soft_stats.tx_window_errors; netdev->stats.tx_carrier_errors = adapter->soft_stats.tx_carrier_errors; netdev->stats.rx_packets = adapter->soft_stats.rx_packets; netdev->stats.tx_packets = adapter->soft_stats.tx_packets; } static void atl1_update_mailbox(struct atl1_adapter *adapter) { unsigned long flags; u32 tpd_next_to_use; u32 rfd_next_to_use; u32 rrd_next_to_clean; u32 value; spin_lock_irqsave(&adapter->mb_lock, flags); tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT) | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT); iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); spin_unlock_irqrestore(&adapter->mb_lock, flags); } static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, struct rx_return_desc *rrd, u16 offset) { struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; if (++rfd_ring->next_to_clean == rfd_ring->count) { rfd_ring->next_to_clean = 0; } } } static void atl1_update_rfd_index(struct atl1_adapter *adapter, struct rx_return_desc *rrd) { u16 num_buf; num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / adapter->rx_buffer_len; if (rrd->num_buf == num_buf) /* clean alloc flag for bad rrd */ atl1_clean_alloc_flag(adapter, rrd, num_buf); } static void atl1_rx_checksum(struct atl1_adapter *adapter, struct rx_return_desc *rrd, struct sk_buff *skb) { struct pci_dev *pdev = adapter->pdev; /* * The L1 hardware contains a bug that erroneously sets the * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a * fragmented IP packet is received, even though the packet * is perfectly valid and its checksum is correct. There's * no way to distinguish between one of these good packets * and a packet that actually contains a TCP/UDP checksum * error, so all we can do is allow it to be handed up to * the higher layers and let it be sorted out there. */ skb_checksum_none_assert(skb); if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | ERR_FLAG_CODE | ERR_FLAG_OV)) { adapter->hw_csum_err++; if (netif_msg_rx_err(adapter)) dev_printk(KERN_DEBUG, &pdev->dev, "rx checksum error\n"); return; } } /* not IPv4 */ if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) /* checksum is invalid, but it's not an IPv4 pkt, so ok */ return; /* IPv4 packet */ if (likely(!(rrd->err_flg & (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { skb->ip_summed = CHECKSUM_UNNECESSARY; adapter->hw_csum_good++; return; } } /** * atl1_alloc_rx_buffers - Replace used receive buffers * @adapter: address of board private structure */ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) { struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct pci_dev *pdev = adapter->pdev; struct page *page; unsigned long offset; struct atl1_buffer *buffer_info, *next_info; struct sk_buff *skb; u16 num_alloc = 0; u16 rfd_next_to_use, next_next; struct rx_free_desc *rfd_desc; next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); if (++next_next == rfd_ring->count) next_next = 0; buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; next_info = &rfd_ring->buffer_info[next_next]; while (!buffer_info->alloced && !next_info->alloced) { if (buffer_info->skb) { buffer_info->alloced = 1; goto next; } rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); skb = netdev_alloc_skb_ip_align(adapter->netdev, adapter->rx_buffer_len); if (unlikely(!skb)) { /* Better luck next round */ adapter->soft_stats.rx_dropped++; break; } buffer_info->alloced = 1; buffer_info->skb = skb; buffer_info->length = (u16) adapter->rx_buffer_len; page = virt_to_page(skb->data); offset = offset_in_page(skb->data); buffer_info->dma = dma_map_page(&pdev->dev, page, offset, adapter->rx_buffer_len, DMA_FROM_DEVICE); rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); rfd_desc->coalese = 0; next: rfd_next_to_use = next_next; if (unlikely(++next_next == rfd_ring->count)) next_next = 0; buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; next_info = &rfd_ring->buffer_info[next_next]; num_alloc++; } if (num_alloc) { /* * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); } return num_alloc; } static int atl1_intr_rx(struct atl1_adapter *adapter, int budget) { int i, count; u16 length; u16 rrd_next_to_clean; u32 value; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1_buffer *buffer_info; struct rx_return_desc *rrd; struct sk_buff *skb; count = 0; rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); while (count < budget) { rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); i = 1; if (likely(rrd->xsz.valid)) { /* packet valid */ chk_rrd: /* check rrd status */ if (likely(rrd->num_buf == 1)) goto rrd_ok; else if (netif_msg_rx_err(adapter)) { dev_printk(KERN_DEBUG, &adapter->pdev->dev, "unexpected RRD buffer count\n"); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "rx_buf_len = %d\n", adapter->rx_buffer_len); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "RRD num_buf = %d\n", rrd->num_buf); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "RRD pkt_len = %d\n", rrd->xsz.xsum_sz.pkt_size); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "RRD pkt_flg = 0x%08X\n", rrd->pkt_flg); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "RRD err_flg = 0x%08X\n", rrd->err_flg); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "RRD vlan_tag = 0x%08X\n", rrd->vlan_tag); } /* rrd seems to be bad */ if (unlikely(i-- > 0)) { /* rrd may not be DMAed completely */ udelay(1); goto chk_rrd; } /* bad rrd */ if (netif_msg_rx_err(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "bad RRD\n"); /* see if update RFD index */ if (rrd->num_buf > 1) atl1_update_rfd_index(adapter, rrd); /* update rrd */ rrd->xsz.valid = 0; if (++rrd_next_to_clean == rrd_ring->count) rrd_next_to_clean = 0; count++; continue; } else { /* current rrd still not be updated */ break; } rrd_ok: /* clean alloc flag for bad rrd */ atl1_clean_alloc_flag(adapter, rrd, 0); buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; if (++rfd_ring->next_to_clean == rfd_ring->count) rfd_ring->next_to_clean = 0; /* update rrd next to clean */ if (++rrd_next_to_clean == rrd_ring->count) rrd_next_to_clean = 0; count++; if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { if (!(rrd->err_flg & (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM | ERR_FLAG_LEN))) { /* packet error, don't need upstream */ buffer_info->alloced = 0; rrd->xsz.valid = 0; continue; } } /* Good Receive */ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, buffer_info->length, DMA_FROM_DEVICE); buffer_info->dma = 0; skb = buffer_info->skb; length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); skb_put(skb, length - ETH_FCS_LEN); /* Receive Checksum Offload */ atl1_rx_checksum(adapter, rrd, skb); skb->protocol = eth_type_trans(skb, adapter->netdev); if (rrd->pkt_flg & PACKET_FLAG_VLAN_INS) { u16 vlan_tag = (rrd->vlan_tag >> 4) | ((rrd->vlan_tag & 7) << 13) | ((rrd->vlan_tag & 8) << 9); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } netif_receive_skb(skb); /* let protocol layer free skb */ buffer_info->skb = NULL; buffer_info->alloced = 0; rrd->xsz.valid = 0; } atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); atl1_alloc_rx_buffers(adapter); /* update mailbox ? */ if (count) { u32 tpd_next_to_use; u32 rfd_next_to_use; spin_lock(&adapter->mb_lock); tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT) | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT); iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); spin_unlock(&adapter->mb_lock); } return count; } static int atl1_intr_tx(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; u16 sw_tpd_next_to_clean; u16 cmb_tpd_next_to_clean; int count = 0; sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; if (buffer_info->dma) { dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, buffer_info->length, DMA_TO_DEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { dev_consume_skb_irq(buffer_info->skb); buffer_info->skb = NULL; } if (++sw_tpd_next_to_clean == tpd_ring->count) sw_tpd_next_to_clean = 0; count++; } atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); if (netif_queue_stopped(adapter->netdev) && netif_carrier_ok(adapter->netdev)) netif_wake_queue(adapter->netdev); return count; } static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) { u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); u16 next_to_use = atomic_read(&tpd_ring->next_to_use); return (next_to_clean > next_to_use) ? next_to_clean - next_to_use - 1 : tpd_ring->count + next_to_clean - next_to_use - 1; } static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, struct tx_packet_desc *ptpd) { u8 hdr_len, ip_off; u32 real_len; if (skb_shinfo(skb)->gso_size) { int err; err = skb_cow_head(skb, 0); if (err < 0) return err; if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); real_len = (((unsigned char *)iph - skb->data) + ntohs(iph->tot_len)); if (real_len < skb->len) { err = pskb_trim(skb, real_len); if (err) return err; } hdr_len = skb_tcp_all_headers(skb); if (skb->len == hdr_len) { iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, tcp_hdrlen(skb), IPPROTO_TCP, 0); ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << TPD_IPHL_SHIFT; ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT; ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT; ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT; return 1; } iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); ip_off = (unsigned char *)iph - (unsigned char *) skb_network_header(skb); if (ip_off == 8) /* 802.3-SNAP frame */ ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; else if (ip_off != 0) return -2; ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << TPD_IPHL_SHIFT; ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT; ptpd->word3 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) << TPD_MSS_SHIFT; ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; return 3; } } return 0; } static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, struct tx_packet_desc *ptpd) { u8 css, cso; if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { css = skb_checksum_start_offset(skb); cso = css + (u8) skb->csum_offset; if (unlikely(css & 0x1)) { /* L1 hardware requires an even number here */ if (netif_msg_tx_err(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "payload offset not an even number\n"); return -1; } ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) << TPD_PLOADOFFSET_SHIFT; ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) << TPD_CCSUMOFFSET_SHIFT; ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT; return true; } return 0; } static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, struct tx_packet_desc *ptpd) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; u16 buf_len = skb->len; struct page *page; unsigned long offset; unsigned int nr_frags; unsigned int f; int retval; u16 next_to_use; u16 data_len; u8 hdr_len; buf_len -= skb->data_len; nr_frags = skb_shinfo(skb)->nr_frags; next_to_use = atomic_read(&tpd_ring->next_to_use); buffer_info = &tpd_ring->buffer_info[next_to_use]; BUG_ON(buffer_info->skb); /* put skb in last TPD */ buffer_info->skb = NULL; retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; if (retval) { /* TSO */ hdr_len = skb_tcp_all_headers(skb); buffer_info->length = hdr_len; page = virt_to_page(skb->data); offset = offset_in_page(skb->data); buffer_info->dma = dma_map_page(&adapter->pdev->dev, page, offset, hdr_len, DMA_TO_DEVICE); if (++next_to_use == tpd_ring->count) next_to_use = 0; if (buf_len > hdr_len) { int i, nseg; data_len = buf_len - hdr_len; nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; for (i = 0; i < nseg; i++) { buffer_info = &tpd_ring->buffer_info[next_to_use]; buffer_info->skb = NULL; buffer_info->length = (ATL1_MAX_TX_BUF_LEN >= data_len) ? ATL1_MAX_TX_BUF_LEN : data_len; data_len -= buffer_info->length; page = virt_to_page(skb->data + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)); offset = offset_in_page(skb->data + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)); buffer_info->dma = dma_map_page(&adapter->pdev->dev, page, offset, buffer_info->length, DMA_TO_DEVICE); if (++next_to_use == tpd_ring->count) next_to_use = 0; } } } else { /* not TSO */ buffer_info->length = buf_len; page = virt_to_page(skb->data); offset = offset_in_page(skb->data); buffer_info->dma = dma_map_page(&adapter->pdev->dev, page, offset, buf_len, DMA_TO_DEVICE); if (++next_to_use == tpd_ring->count) next_to_use = 0; } for (f = 0; f < nr_frags; f++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; u16 i, nseg; buf_len = skb_frag_size(frag); nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; for (i = 0; i < nseg; i++) { buffer_info = &tpd_ring->buffer_info[next_to_use]; BUG_ON(buffer_info->skb); buffer_info->skb = NULL; buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ? ATL1_MAX_TX_BUF_LEN : buf_len; buf_len -= buffer_info->length; buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev, frag, i * ATL1_MAX_TX_BUF_LEN, buffer_info->length, DMA_TO_DEVICE); if (++next_to_use == tpd_ring->count) next_to_use = 0; } } /* last tpd's buffer-info */ buffer_info->skb = skb; } static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count, struct tx_packet_desc *ptpd) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; struct tx_packet_desc *tpd; u16 j; u32 val; u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use); for (j = 0; j < count; j++) { buffer_info = &tpd_ring->buffer_info[next_to_use]; tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use); if (tpd != ptpd) memcpy(tpd, ptpd, sizeof(struct tx_packet_desc)); tpd->buffer_addr = cpu_to_le64(buffer_info->dma); tpd->word2 &= ~(TPD_BUFLEN_MASK << TPD_BUFLEN_SHIFT); tpd->word2 |= (cpu_to_le16(buffer_info->length) & TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT; /* * if this is the first packet in a TSO chain, set * TPD_HDRFLAG, otherwise, clear it. */ val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; if (val) { if (!j) tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; else tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT); } if (j == (count - 1)) tpd->word3 |= 1 << TPD_EOP_SHIFT; if (++next_to_use == tpd_ring->count) next_to_use = 0; } /* * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); atomic_set(&tpd_ring->next_to_use, next_to_use); } static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; int len; int tso; int count = 1; int ret_val; struct tx_packet_desc *ptpd; u16 vlan_tag; unsigned int nr_frags = 0; unsigned int mss = 0; unsigned int f; unsigned int proto_hdr_len; len = skb_headlen(skb); if (unlikely(skb->len <= 0)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) { unsigned int f_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); count += (f_size + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; } mss = skb_shinfo(skb)->gso_size; if (mss) { if (skb->protocol == htons(ETH_P_IP)) { proto_hdr_len = skb_tcp_all_headers(skb); if (unlikely(proto_hdr_len > len)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* need additional TPD ? */ if (proto_hdr_len != len) count += (len - proto_hdr_len + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; } } if (atl1_tpd_avail(&adapter->tpd_ring) < count) { /* not enough descriptors */ netif_stop_queue(netdev); if (netif_msg_tx_queued(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n"); return NETDEV_TX_BUSY; } ptpd = ATL1_TPD_DESC(tpd_ring, (u16) atomic_read(&tpd_ring->next_to_use)); memset(ptpd, 0, sizeof(struct tx_packet_desc)); if (skb_vlan_tag_present(skb)) { vlan_tag = skb_vlan_tag_get(skb); vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | ((vlan_tag >> 9) & 0x8); ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; ptpd->word2 |= (vlan_tag & TPD_VLANTAG_MASK) << TPD_VLANTAG_SHIFT; } tso = atl1_tso(adapter, skb, ptpd); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (!tso) { ret_val = atl1_tx_csum(adapter, skb, ptpd); if (ret_val < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } } atl1_tx_map(adapter, skb, ptpd); atl1_tx_queue(adapter, count, ptpd); atl1_update_mailbox(adapter); return NETDEV_TX_OK; } static int atl1_rings_clean(struct napi_struct *napi, int budget) { struct atl1_adapter *adapter = container_of(napi, struct atl1_adapter, napi); int work_done = atl1_intr_rx(adapter, budget); if (atl1_intr_tx(adapter)) work_done = budget; /* Let's come again to process some more packets */ if (work_done >= budget) return work_done; napi_complete_done(napi, work_done); /* re-enable Interrupt */ if (likely(adapter->int_enabled)) atlx_imr_set(adapter, IMR_NORMAL_MASK); return work_done; } static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter) { if (!napi_schedule_prep(&adapter->napi)) /* It is possible in case even the RX/TX ints are disabled via IMR * register the ISR bits are set anyway (but do not produce IRQ). * To handle such situation the napi functions used to check is * something scheduled or not. */ return 0; __napi_schedule(&adapter->napi); /* * Disable RX/TX ints via IMR register if it is * allowed. NAPI handler must reenable them in same * way. */ if (!adapter->int_enabled) return 1; atlx_imr_set(adapter, IMR_NORXTX_MASK); return 1; } /** * atl1_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure */ static irqreturn_t atl1_intr(int irq, void *data) { struct atl1_adapter *adapter = netdev_priv(data); u32 status; status = adapter->cmb.cmb->int_stats; if (!status) return IRQ_NONE; /* clear CMB interrupt status at once, * but leave rx/tx interrupt status in case it should be dropped * only if rx/tx processing queued. In other case interrupt * can be lost. */ adapter->cmb.cmb->int_stats = status & (ISR_CMB_TX | ISR_CMB_RX); if (status & ISR_GPHY) /* clear phy status */ atlx_clear_phy_int(adapter); /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); /* check if SMB intr */ if (status & ISR_SMB) atl1_inc_smb(adapter); /* check if PCIE PHY Link down */ if (status & ISR_PHY_LINKDOWN) { if (netif_msg_intr(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "pcie phy link down %x\n", status); if (netif_running(adapter->netdev)) { /* reset MAC */ atlx_irq_disable(adapter); schedule_work(&adapter->reset_dev_task); return IRQ_HANDLED; } } /* check if DMA read/write error ? */ if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { if (netif_msg_intr(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "pcie DMA r/w error (status = 0x%x)\n", status); atlx_irq_disable(adapter); schedule_work(&adapter->reset_dev_task); return IRQ_HANDLED; } /* link event */ if (status & ISR_GPHY) { adapter->soft_stats.tx_carrier_errors++; atl1_check_for_link(adapter); } /* transmit or receive event */ if (status & (ISR_CMB_TX | ISR_CMB_RX) && atl1_sched_rings_clean(adapter)) adapter->cmb.cmb->int_stats = adapter->cmb.cmb->int_stats & ~(ISR_CMB_TX | ISR_CMB_RX); /* rx exception */ if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | ISR_HOST_RRD_OV))) { if (netif_msg_intr(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "rx exception, ISR = 0x%x\n", status); atl1_sched_rings_clean(adapter); } /* re-enable Interrupt */ iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); return IRQ_HANDLED; } /** * atl1_phy_config - Timer Call-back * @t: timer_list containing pointer to netdev cast into an unsigned long */ static void atl1_phy_config(struct timer_list *t) { struct atl1_adapter *adapter = from_timer(adapter, t, phy_config_timer); struct atl1_hw *hw = &adapter->hw; unsigned long flags; spin_lock_irqsave(&adapter->lock, flags); adapter->phy_timer_pending = false; atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg); atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); spin_unlock_irqrestore(&adapter->lock, flags); } /* * Orphaned vendor comment left intact here: * <vendor comment> * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT * will assert. We do soft reset <0x1400=1> according * with the SPEC. BUT, it seemes that PCIE or DMA * state-machine will not be reset. DMAR_TO_INT will * assert again and again. * </vendor comment> */ static int atl1_reset(struct atl1_adapter *adapter) { int ret; ret = atl1_reset_hw(&adapter->hw); if (ret) return ret; return atl1_init_hw(&adapter->hw); } static s32 atl1_up(struct atl1_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err; int irq_flags = 0; /* hardware has been reset, we need to reload some things */ atlx_set_multi(netdev); atl1_init_ring_ptrs(adapter); atlx_restore_vlan(adapter); err = atl1_alloc_rx_buffers(adapter); if (unlikely(!err)) /* no RX BUFFER allocated */ return -ENOMEM; if (unlikely(atl1_configure(adapter))) { err = -EIO; goto err_up; } err = pci_enable_msi(adapter->pdev); if (err) { if (netif_msg_ifup(adapter)) dev_info(&adapter->pdev->dev, "Unable to enable MSI: %d\n", err); irq_flags |= IRQF_SHARED; } err = request_irq(adapter->pdev->irq, atl1_intr, irq_flags, netdev->name, netdev); if (unlikely(err)) goto err_up; napi_enable(&adapter->napi); atlx_irq_enable(adapter); atl1_check_link(adapter); netif_start_queue(netdev); return 0; err_up: pci_disable_msi(adapter->pdev); /* free rx_buffers */ atl1_clean_rx_ring(adapter); return err; } static void atl1_down(struct atl1_adapter *adapter) { struct net_device *netdev = adapter->netdev; napi_disable(&adapter->napi); netif_stop_queue(netdev); del_timer_sync(&adapter->phy_config_timer); adapter->phy_timer_pending = false; atlx_irq_disable(adapter); free_irq(adapter->pdev->irq, netdev); pci_disable_msi(adapter->pdev); atl1_reset_hw(&adapter->hw); adapter->cmb.cmb->int_stats = 0; adapter->link_speed = SPEED_0; adapter->link_duplex = -1; netif_carrier_off(netdev); atl1_clean_tx_ring(adapter); atl1_clean_rx_ring(adapter); } static void atl1_reset_dev_task(struct work_struct *work) { struct atl1_adapter *adapter = container_of(work, struct atl1_adapter, reset_dev_task); struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); atl1_down(adapter); atl1_up(adapter); netif_device_attach(netdev); } /** * atl1_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure */ static int atl1_change_mtu(struct net_device *netdev, int new_mtu) { struct atl1_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; adapter->hw.max_frame_size = max_frame; adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; adapter->rx_buffer_len = (max_frame + 7) & ~7; adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; netdev->mtu = new_mtu; if (netif_running(netdev)) { atl1_down(adapter); atl1_up(adapter); } return 0; } /** * atl1_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. */ static int atl1_open(struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); int err; netif_carrier_off(netdev); /* allocate transmit descriptors */ err = atl1_setup_ring_resources(adapter); if (err) return err; err = atl1_up(adapter); if (err) goto err_up; return 0; err_up: atl1_reset(adapter); return err; } /** * atl1_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. */ static int atl1_close(struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); atl1_down(adapter); atl1_free_ring_resources(adapter); return 0; } #ifdef CONFIG_PM_SLEEP static int atl1_suspend(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; u32 ctrl = 0; u32 wufc = adapter->wol; u32 val; u16 speed; u16 duplex; netif_device_detach(netdev); if (netif_running(netdev)) atl1_down(adapter); atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); val = ctrl & BMSR_LSTATUS; if (val) wufc &= ~ATLX_WUFC_LNKC; if (!wufc) goto disable_wol; if (val) { val = atl1_get_speed_and_duplex(hw, &speed, &duplex); if (val) { if (netif_msg_ifdown(adapter)) dev_printk(KERN_DEBUG, dev, "error getting speed/duplex\n"); goto disable_wol; } ctrl = 0; /* enable magic packet WOL */ if (wufc & ATLX_WUFC_MAG) ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); ioread32(hw->hw_addr + REG_WOL_CTRL); /* configure the mac */ ctrl = MAC_CTRL_RX_EN; ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); if (duplex == FULL_DUPLEX) ctrl |= MAC_CTRL_DUPLX; ctrl |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); __atlx_vlan_mode(netdev->features, &ctrl); if (wufc & ATLX_WUFC_MAG) ctrl |= MAC_CTRL_BC_EN; iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); ioread32(hw->hw_addr + REG_MAC_CTRL); /* poke the PHY */ ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); ioread32(hw->hw_addr + REG_PCIE_PHYMISC); } else { ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); ioread32(hw->hw_addr + REG_WOL_CTRL); iowrite32(0, hw->hw_addr + REG_MAC_CTRL); ioread32(hw->hw_addr + REG_MAC_CTRL); hw->phy_configured = false; } return 0; disable_wol: iowrite32(0, hw->hw_addr + REG_WOL_CTRL); ioread32(hw->hw_addr + REG_WOL_CTRL); ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); ioread32(hw->hw_addr + REG_PCIE_PHYMISC); hw->phy_configured = false; return 0; } static int atl1_resume(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct atl1_adapter *adapter = netdev_priv(netdev); iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); atl1_reset_hw(&adapter->hw); if (netif_running(netdev)) { adapter->cmb.cmb->int_stats = 0; atl1_up(adapter); } netif_device_attach(netdev); return 0; } #endif static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume); static void atl1_shutdown(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1_adapter *adapter = netdev_priv(netdev); #ifdef CONFIG_PM_SLEEP atl1_suspend(&pdev->dev); #endif pci_wake_from_d3(pdev, adapter->wol); pci_set_power_state(pdev, PCI_D3hot); } #ifdef CONFIG_NET_POLL_CONTROLLER static void atl1_poll_controller(struct net_device *netdev) { disable_irq(netdev->irq); atl1_intr(netdev->irq, netdev); enable_irq(netdev->irq); } #endif static const struct net_device_ops atl1_netdev_ops = { .ndo_open = atl1_open, .ndo_stop = atl1_close, .ndo_start_xmit = atl1_xmit_frame, .ndo_set_rx_mode = atlx_set_multi, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = atl1_set_mac, .ndo_change_mtu = atl1_change_mtu, .ndo_fix_features = atlx_fix_features, .ndo_set_features = atlx_set_features, .ndo_eth_ioctl = atlx_ioctl, .ndo_tx_timeout = atlx_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = atl1_poll_controller, #endif }; /** * atl1_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in atl1_pci_tbl * * Returns 0 on success, negative on failure * * atl1_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. */ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct atl1_adapter *adapter; static int cards_found = 0; int err; err = pci_enable_device(pdev); if (err) return err; /* * The atl1 chip can DMA to 64-bit addresses, but it uses a single * shared register for the high 32 bits, so only a single, aligned, * 4 GB physical address range can be used at a time. * * Supporting 64-bit DMA on this hardware is more trouble than it's * worth. It is far easier to limit to 32-bit DMA than update * various kernel subsystems to support the mechanics required by a * fixed-high-32-bit system. */ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto err_dma; } /* * Mark all PCI regions associated with PCI device * pdev as being reserved by owner atl1_driver_name */ err = pci_request_regions(pdev, ATLX_DRIVER_NAME); if (err) goto err_request_regions; /* * Enables bus-mastering on the device and calls * pcibios_set_master to do the needed arch specific settings */ pci_set_master(pdev); netdev = alloc_etherdev(sizeof(struct atl1_adapter)); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; } SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; adapter->hw.back = adapter; adapter->msg_enable = netif_msg_init(debug, atl1_default_msg); adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); if (!adapter->hw.hw_addr) { err = -EIO; goto err_pci_iomap; } /* get device revision number */ adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + (REG_MASTER_CTRL + 2)); /* set default ring resource counts */ adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; adapter->tpd_ring.count = ATL1_DEFAULT_TPD; adapter->mii.dev = netdev; adapter->mii.mdio_read = mdio_read; adapter->mii.mdio_write = mdio_write; adapter->mii.phy_id_mask = 0x1f; adapter->mii.reg_num_mask = 0x1f; netdev->netdev_ops = &atl1_netdev_ops; netdev->watchdog_timeo = 5 * HZ; netif_napi_add(netdev, &adapter->napi, atl1_rings_clean); netdev->ethtool_ops = &atl1_ethtool_ops; adapter->bd_number = cards_found; /* setup the private structure */ err = atl1_sw_init(adapter); if (err) goto err_common; netdev->features = NETIF_F_HW_CSUM; netdev->features |= NETIF_F_SG; netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_RX; /* is this valid? see atl1_setup_mac_ctrl() */ netdev->features |= NETIF_F_RXCSUM; /* MTU range: 42 - 10218 */ netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN); netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); /* * patch for some L1 of old version, * the final version of L1 may not need these * patches */ /* atl1_pcie_patch(adapter); */ /* really reset GPHY core */ iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); /* * reset the controller to * put the device in a known good starting state */ if (atl1_reset_hw(&adapter->hw)) { err = -EIO; goto err_common; } /* copy the MAC address out of the EEPROM */ if (atl1_read_mac_addr(&adapter->hw)) { /* mark random mac */ netdev->addr_assign_type = NET_ADDR_RANDOM; } eth_hw_addr_set(netdev, adapter->hw.mac_addr); if (!is_valid_ether_addr(netdev->dev_addr)) { err = -EIO; goto err_common; } atl1_check_options(adapter); /* pre-init the MAC, and setup link */ err = atl1_init_hw(&adapter->hw); if (err) { err = -EIO; goto err_common; } atl1_pcie_patch(adapter); /* assume we have no link for now */ netif_carrier_off(netdev); timer_setup(&adapter->phy_config_timer, atl1_phy_config, 0); adapter->phy_timer_pending = false; INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task); INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); err = register_netdev(netdev); if (err) goto err_common; cards_found++; atl1_via_workaround(adapter); return 0; err_common: pci_iounmap(pdev, adapter->hw.hw_addr); err_pci_iomap: free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_dma: err_request_regions: pci_disable_device(pdev); return err; } /** * atl1_remove - Device Removal Routine * @pdev: PCI device information struct * * atl1_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. */ static void atl1_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1_adapter *adapter; /* Device not available. Return. */ if (!netdev) return; adapter = netdev_priv(netdev); /* * Some atl1 boards lack persistent storage for their MAC, and get it * from the BIOS during POST. If we've been messing with the MAC * address, we need to save the permanent one. */ if (!ether_addr_equal_unaligned(adapter->hw.mac_addr, adapter->hw.perm_mac_addr)) { memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN); atl1_set_mac_addr(&adapter->hw); } iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); unregister_netdev(netdev); pci_iounmap(pdev, adapter->hw.hw_addr); pci_release_regions(pdev); free_netdev(netdev); pci_disable_device(pdev); } static struct pci_driver atl1_driver = { .name = ATLX_DRIVER_NAME, .id_table = atl1_pci_tbl, .probe = atl1_probe, .remove = atl1_remove, .shutdown = atl1_shutdown, .driver.pm = &atl1_pm_ops, }; struct atl1_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; }; #define ATL1_STAT(m) \ sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m) static struct atl1_stats atl1_gstrings_stats[] = { {"rx_packets", ATL1_STAT(soft_stats.rx_packets)}, {"tx_packets", ATL1_STAT(soft_stats.tx_packets)}, {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)}, {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)}, {"rx_errors", ATL1_STAT(soft_stats.rx_errors)}, {"tx_errors", ATL1_STAT(soft_stats.tx_errors)}, {"multicast", ATL1_STAT(soft_stats.multicast)}, {"collisions", ATL1_STAT(soft_stats.collisions)}, {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)}, {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)}, {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)}, {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)}, {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)}, {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)}, {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)}, {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)}, {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)}, {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)}, {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, {"tx_underrun", ATL1_STAT(soft_stats.tx_underrun)}, {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)}, {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)} }; static void atl1_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct atl1_adapter *adapter = netdev_priv(netdev); int i; char *p; for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { p = (char *)adapter+atl1_gstrings_stats[i].stat_offset; data[i] = (atl1_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } static int atl1_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(atl1_gstrings_stats); default: return -EOPNOTSUPP; } } static int atl1_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; u32 supported, advertising; supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP); advertising = ADVERTISED_TP; if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) { advertising |= ADVERTISED_Autoneg; if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) { advertising |= ADVERTISED_Autoneg; advertising |= (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full); } else advertising |= (ADVERTISED_1000baseT_Full); } cmd->base.port = PORT_TP; cmd->base.phy_address = 0; if (netif_carrier_ok(adapter->netdev)) { u16 link_speed, link_duplex; atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex); cmd->base.speed = link_speed; if (link_duplex == FULL_DUPLEX) cmd->base.duplex = DUPLEX_FULL; else cmd->base.duplex = DUPLEX_HALF; } else { cmd->base.speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN; } if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) cmd->base.autoneg = AUTONEG_ENABLE; else cmd->base.autoneg = AUTONEG_DISABLE; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); return 0; } static int atl1_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; u16 phy_data; int ret_val = 0; u16 old_media_type = hw->media_type; if (netif_running(adapter->netdev)) { if (netif_msg_link(adapter)) dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n"); atl1_down(adapter); } if (cmd->base.autoneg == AUTONEG_ENABLE) hw->media_type = MEDIA_TYPE_AUTO_SENSOR; else { u32 speed = cmd->base.speed; if (speed == SPEED_1000) { if (cmd->base.duplex != DUPLEX_FULL) { if (netif_msg_link(adapter)) dev_warn(&adapter->pdev->dev, "1000M half is invalid\n"); ret_val = -EINVAL; goto exit_sset; } hw->media_type = MEDIA_TYPE_1000M_FULL; } else if (speed == SPEED_100) { if (cmd->base.duplex == DUPLEX_FULL) hw->media_type = MEDIA_TYPE_100M_FULL; else hw->media_type = MEDIA_TYPE_100M_HALF; } else { if (cmd->base.duplex == DUPLEX_FULL) hw->media_type = MEDIA_TYPE_10M_FULL; else hw->media_type = MEDIA_TYPE_10M_HALF; } } if (atl1_phy_setup_autoneg_adv(hw)) { ret_val = -EINVAL; if (netif_msg_link(adapter)) dev_warn(&adapter->pdev->dev, "invalid ethtool speed/duplex setting\n"); goto exit_sset; } if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; else { switch (hw->media_type) { case MEDIA_TYPE_100M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_100M_HALF: phy_data = MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_10M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; break; default: /* MEDIA_TYPE_10M_HALF: */ phy_data = MII_CR_SPEED_10 | MII_CR_RESET; break; } } atl1_write_phy_reg(hw, MII_BMCR, phy_data); exit_sset: if (ret_val) hw->media_type = old_media_type; if (netif_running(adapter->netdev)) { if (netif_msg_link(adapter)) dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n"); atl1_up(adapter); } else if (!ret_val) { if (netif_msg_link(adapter)) dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n"); atl1_reset(adapter); } return ret_val; } static void atl1_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct atl1_adapter *adapter = netdev_priv(netdev); strscpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); } static void atl1_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl1_adapter *adapter = netdev_priv(netdev); wol->supported = WAKE_MAGIC; wol->wolopts = 0; if (adapter->wol & ATLX_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; } static int atl1_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl1_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_ARP | WAKE_MAGICSECURE)) return -EOPNOTSUPP; adapter->wol = 0; if (wol->wolopts & WAKE_MAGIC) adapter->wol |= ATLX_WUFC_MAG; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); return 0; } static u32 atl1_get_msglevel(struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); return adapter->msg_enable; } static void atl1_set_msglevel(struct net_device *netdev, u32 value) { struct atl1_adapter *adapter = netdev_priv(netdev); adapter->msg_enable = value; } static int atl1_get_regs_len(struct net_device *netdev) { return ATL1_REG_COUNT * sizeof(u32); } static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; unsigned int i; u32 *regbuf = p; for (i = 0; i < ATL1_REG_COUNT; i++) { /* * This switch statement avoids reserved regions * of register space. */ switch (i) { case 6 ... 9: case 14: case 29 ... 31: case 34 ... 63: case 75 ... 127: case 136 ... 1023: case 1027 ... 1087: case 1091 ... 1151: case 1194 ... 1195: case 1200 ... 1201: case 1206 ... 1213: case 1216 ... 1279: case 1290 ... 1311: case 1323 ... 1343: case 1358 ... 1359: case 1368 ... 1375: case 1378 ... 1383: case 1388 ... 1391: case 1393 ... 1395: case 1402 ... 1403: case 1410 ... 1471: case 1522 ... 1535: /* reserved region; don't read it */ regbuf[i] = 0; break; default: /* unreserved region */ regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32))); } } } static void atl1_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_tpd_ring *txdr = &adapter->tpd_ring; struct atl1_rfd_ring *rxdr = &adapter->rfd_ring; ring->rx_max_pending = ATL1_MAX_RFD; ring->tx_max_pending = ATL1_MAX_TPD; ring->rx_pending = rxdr->count; ring->tx_pending = txdr->count; } static int atl1_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_tpd_ring *tpdr = &adapter->tpd_ring; struct atl1_rrd_ring *rrdr = &adapter->rrd_ring; struct atl1_rfd_ring *rfdr = &adapter->rfd_ring; struct atl1_tpd_ring tpd_old, tpd_new; struct atl1_rfd_ring rfd_old, rfd_new; struct atl1_rrd_ring rrd_old, rrd_new; struct atl1_ring_header rhdr_old, rhdr_new; struct atl1_smb smb; struct atl1_cmb cmb; int err; tpd_old = adapter->tpd_ring; rfd_old = adapter->rfd_ring; rrd_old = adapter->rrd_ring; rhdr_old = adapter->ring_header; if (netif_running(adapter->netdev)) atl1_down(adapter); rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD); rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD : rfdr->count; rfdr->count = (rfdr->count + 3) & ~3; rrdr->count = rfdr->count; tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD); tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD : tpdr->count; tpdr->count = (tpdr->count + 3) & ~3; if (netif_running(adapter->netdev)) { /* try to get new resources before deleting old */ err = atl1_setup_ring_resources(adapter); if (err) goto err_setup_ring; /* * save the new, restore the old in order to free it, * then restore the new back again */ rfd_new = adapter->rfd_ring; rrd_new = adapter->rrd_ring; tpd_new = adapter->tpd_ring; rhdr_new = adapter->ring_header; adapter->rfd_ring = rfd_old; adapter->rrd_ring = rrd_old; adapter->tpd_ring = tpd_old; adapter->ring_header = rhdr_old; /* * Save SMB and CMB, since atl1_free_ring_resources * will clear them. */ smb = adapter->smb; cmb = adapter->cmb; atl1_free_ring_resources(adapter); adapter->rfd_ring = rfd_new; adapter->rrd_ring = rrd_new; adapter->tpd_ring = tpd_new; adapter->ring_header = rhdr_new; adapter->smb = smb; adapter->cmb = cmb; err = atl1_up(adapter); if (err) return err; } return 0; err_setup_ring: adapter->rfd_ring = rfd_old; adapter->rrd_ring = rrd_old; adapter->tpd_ring = tpd_old; adapter->ring_header = rhdr_old; atl1_up(adapter); return err; } static void atl1_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *epause) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) { epause->autoneg = AUTONEG_ENABLE; } else { epause->autoneg = AUTONEG_DISABLE; } epause->rx_pause = 1; epause->tx_pause = 1; } static int atl1_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *epause) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) { epause->autoneg = AUTONEG_ENABLE; } else { epause->autoneg = AUTONEG_DISABLE; } epause->rx_pause = 1; epause->tx_pause = 1; return 0; } static void atl1_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { memcpy(p, atl1_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } break; } } static int atl1_nway_reset(struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; if (netif_running(netdev)) { u16 phy_data; atl1_down(adapter); if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) { phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; } else { switch (hw->media_type) { case MEDIA_TYPE_100M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_100M_HALF: phy_data = MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_10M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; break; default: /* MEDIA_TYPE_10M_HALF */ phy_data = MII_CR_SPEED_10 | MII_CR_RESET; } } atl1_write_phy_reg(hw, MII_BMCR, phy_data); atl1_up(adapter); } return 0; } static const struct ethtool_ops atl1_ethtool_ops = { .get_drvinfo = atl1_get_drvinfo, .get_wol = atl1_get_wol, .set_wol = atl1_set_wol, .get_msglevel = atl1_get_msglevel, .set_msglevel = atl1_set_msglevel, .get_regs_len = atl1_get_regs_len, .get_regs = atl1_get_regs, .get_ringparam = atl1_get_ringparam, .set_ringparam = atl1_set_ringparam, .get_pauseparam = atl1_get_pauseparam, .set_pauseparam = atl1_set_pauseparam, .get_link = ethtool_op_get_link, .get_strings = atl1_get_strings, .nway_reset = atl1_nway_reset, .get_ethtool_stats = atl1_get_ethtool_stats, .get_sset_count = atl1_get_sset_count, .get_link_ksettings = atl1_get_link_ksettings, .set_link_ksettings = atl1_set_link_ksettings, }; module_pci_driver(atl1_driver);
linux-master
drivers/net/ethernet/atheros/atlx/atl1.c
// SPDX-License-Identifier: GPL-2.0-or-later /* atlx.c -- common functions for Attansic network drivers * * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. * Copyright(c) 2006 - 2007 Chris Snook <[email protected]> * Copyright(c) 2006 - 2008 Jay Cliburn <[email protected]> * Copyright(c) 2007 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. */ /* Including this file like a header is a temporary hack, I promise. -- CHS */ #ifndef ATLX_C #define ATLX_C #include <linux/device.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/if.h> #include <linux/netdevice.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/workqueue.h> #include "atlx.h" static s32 atlx_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data); static u32 atlx_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr); static void atlx_set_mac_addr(struct atl1_hw *hw); static struct atlx_spi_flash_dev flash_table[] = { /* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */ {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62}, {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60}, {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7}, }; static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return atlx_mii_ioctl(netdev, ifr, cmd); default: return -EOPNOTSUPP; } } /** * atlx_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure */ static int atlx_set_mac(struct net_device *netdev, void *p) { struct atlx_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; if (netif_running(netdev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(netdev, addr->sa_data); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atlx_set_mac_addr(&adapter->hw); return 0; } static void atlx_check_for_link(struct atlx_adapter *adapter) { struct net_device *netdev = adapter->netdev; u16 phy_data = 0; spin_lock(&adapter->lock); adapter->phy_timer_pending = false; atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); spin_unlock(&adapter->lock); /* notify upper layer link down ASAP */ if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ if (netif_carrier_ok(netdev)) { /* old link state: Up */ dev_info(&adapter->pdev->dev, "%s link is down\n", netdev->name); adapter->link_speed = SPEED_0; netif_carrier_off(netdev); } } schedule_work(&adapter->link_chg_task); } /** * atlx_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * * The set_multi entry point is called whenever the multicast address * list or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper multicast, * promiscuous mode, and all-multi behavior. */ static void atlx_set_multi(struct net_device *netdev) { struct atlx_adapter *adapter = netdev_priv(netdev); struct atlx_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; u32 rctl; u32 hash_value; /* Check for Promiscuous and All Multicast modes */ rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); if (netdev->flags & IFF_PROMISC) rctl |= MAC_CTRL_PROMIS_EN; else if (netdev->flags & IFF_ALLMULTI) { rctl |= MAC_CTRL_MC_ALL_EN; rctl &= ~MAC_CTRL_PROMIS_EN; } else rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); /* clear the old settings from the multicast hash table */ iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); /* compute mc addresses' hash value ,and put it into hash table */ netdev_for_each_mc_addr(ha, netdev) { hash_value = atlx_hash_mc_addr(hw, ha->addr); atlx_hash_set(hw, hash_value); } } static inline void atlx_imr_set(struct atlx_adapter *adapter, unsigned int imr) { iowrite32(imr, adapter->hw.hw_addr + REG_IMR); ioread32(adapter->hw.hw_addr + REG_IMR); } /** * atlx_irq_enable - Enable default interrupt generation settings * @adapter: board private structure */ static void atlx_irq_enable(struct atlx_adapter *adapter) { atlx_imr_set(adapter, IMR_NORMAL_MASK); adapter->int_enabled = true; } /** * atlx_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure */ static void atlx_irq_disable(struct atlx_adapter *adapter) { adapter->int_enabled = false; atlx_imr_set(adapter, 0); synchronize_irq(adapter->pdev->irq); } static void atlx_clear_phy_int(struct atlx_adapter *adapter) { u16 phy_data; unsigned long flags; spin_lock_irqsave(&adapter->lock, flags); atlx_read_phy_reg(&adapter->hw, 19, &phy_data); spin_unlock_irqrestore(&adapter->lock, flags); } /** * atlx_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure */ static void atlx_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct atlx_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ schedule_work(&adapter->reset_dev_task); } /* * atlx_link_chg_task - deal with link change event Out of interrupt context */ static void atlx_link_chg_task(struct work_struct *work) { struct atlx_adapter *adapter; unsigned long flags; adapter = container_of(work, struct atlx_adapter, link_chg_task); spin_lock_irqsave(&adapter->lock, flags); atlx_check_link(adapter); spin_unlock_irqrestore(&adapter->lock, flags); } static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl) { if (features & NETIF_F_HW_VLAN_CTAG_RX) { /* enable VLAN tag insert/strip */ *ctrl |= MAC_CTRL_RMV_VLAN; } else { /* disable VLAN tag insert/strip */ *ctrl &= ~MAC_CTRL_RMV_VLAN; } } static void atlx_vlan_mode(struct net_device *netdev, netdev_features_t features) { struct atlx_adapter *adapter = netdev_priv(netdev); unsigned long flags; u32 ctrl; spin_lock_irqsave(&adapter->lock, flags); /* atlx_irq_disable(adapter); FIXME: confirm/remove */ ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); __atlx_vlan_mode(features, &ctrl); iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); /* atlx_irq_enable(adapter); FIXME */ spin_unlock_irqrestore(&adapter->lock, flags); } static void atlx_restore_vlan(struct atlx_adapter *adapter) { atlx_vlan_mode(adapter->netdev, adapter->netdev->features); } static netdev_features_t atlx_fix_features(struct net_device *netdev, netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ if (features & NETIF_F_HW_VLAN_CTAG_RX) features |= NETIF_F_HW_VLAN_CTAG_TX; else features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } static int atlx_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_CTAG_RX) atlx_vlan_mode(netdev, features); return 0; } #endif /* ATLX_C */
linux-master
drivers/net/ethernet/atheros/atlx/atlx.c
/* * Copyright (c) 2013 Johannes Berg <[email protected]> * * This file is free software: you may copy, redistribute and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation, either version 2 of the License, or (at your * option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * This file incorporates work covered by the following copyright and * permission notice: * * Copyright (c) 2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/mdio.h> #include "reg.h" #include "hw.h" static inline bool alx_is_rev_a(u8 rev) { return rev == ALX_REV_A0 || rev == ALX_REV_A1; } static int alx_wait_mdio_idle(struct alx_hw *hw) { u32 val; int i; for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) { val = alx_read_mem32(hw, ALX_MDIO); if (!(val & ALX_MDIO_BUSY)) return 0; udelay(10); } return -ETIMEDOUT; } static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev, u16 reg, u16 *phy_data) { u32 val, clk_sel; int err; *phy_data = 0; /* use slow clock when it's in hibernation status */ clk_sel = hw->link_speed != SPEED_UNKNOWN ? ALX_MDIO_CLK_SEL_25MD4 : ALX_MDIO_CLK_SEL_25MD128; if (ext) { val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT | reg << ALX_MDIO_EXTN_REG_SHIFT; alx_write_mem32(hw, ALX_MDIO_EXTN, val); val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START | ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ | clk_sel << ALX_MDIO_CLK_SEL_SHIFT; } else { val = ALX_MDIO_SPRES_PRMBL | clk_sel << ALX_MDIO_CLK_SEL_SHIFT | reg << ALX_MDIO_REG_SHIFT | ALX_MDIO_START | ALX_MDIO_OP_READ; } alx_write_mem32(hw, ALX_MDIO, val); err = alx_wait_mdio_idle(hw); if (err) return err; val = alx_read_mem32(hw, ALX_MDIO); *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA); return 0; } static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev, u16 reg, u16 phy_data) { u32 val, clk_sel; /* use slow clock when it's in hibernation status */ clk_sel = hw->link_speed != SPEED_UNKNOWN ? ALX_MDIO_CLK_SEL_25MD4 : ALX_MDIO_CLK_SEL_25MD128; if (ext) { val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT | reg << ALX_MDIO_EXTN_REG_SHIFT; alx_write_mem32(hw, ALX_MDIO_EXTN, val); val = ALX_MDIO_SPRES_PRMBL | clk_sel << ALX_MDIO_CLK_SEL_SHIFT | phy_data << ALX_MDIO_DATA_SHIFT | ALX_MDIO_START | ALX_MDIO_MODE_EXT; } else { val = ALX_MDIO_SPRES_PRMBL | clk_sel << ALX_MDIO_CLK_SEL_SHIFT | reg << ALX_MDIO_REG_SHIFT | phy_data << ALX_MDIO_DATA_SHIFT | ALX_MDIO_START; } alx_write_mem32(hw, ALX_MDIO, val); return alx_wait_mdio_idle(hw); } static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data) { return alx_read_phy_core(hw, false, 0, reg, phy_data); } static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data) { return alx_write_phy_core(hw, false, 0, reg, phy_data); } static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata) { return alx_read_phy_core(hw, true, dev, reg, pdata); } static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data) { return alx_write_phy_core(hw, true, dev, reg, data); } static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata) { int err; err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg); if (err) return err; return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata); } static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data) { int err; err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg); if (err) return err; return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data); } int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data) { int err; spin_lock(&hw->mdio_lock); err = __alx_read_phy_reg(hw, reg, phy_data); spin_unlock(&hw->mdio_lock); return err; } int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data) { int err; spin_lock(&hw->mdio_lock); err = __alx_write_phy_reg(hw, reg, phy_data); spin_unlock(&hw->mdio_lock); return err; } int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata) { int err; spin_lock(&hw->mdio_lock); err = __alx_read_phy_ext(hw, dev, reg, pdata); spin_unlock(&hw->mdio_lock); return err; } int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data) { int err; spin_lock(&hw->mdio_lock); err = __alx_write_phy_ext(hw, dev, reg, data); spin_unlock(&hw->mdio_lock); return err; } static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata) { int err; spin_lock(&hw->mdio_lock); err = __alx_read_phy_dbg(hw, reg, pdata); spin_unlock(&hw->mdio_lock); return err; } static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data) { int err; spin_lock(&hw->mdio_lock); err = __alx_write_phy_dbg(hw, reg, data); spin_unlock(&hw->mdio_lock); return err; } static u16 alx_get_phy_config(struct alx_hw *hw) { u32 val; u16 phy_val; val = alx_read_mem32(hw, ALX_PHY_CTRL); /* phy in reset */ if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0) return ALX_DRV_PHY_UNKNOWN; val = alx_read_mem32(hw, ALX_DRV); val = ALX_GET_FIELD(val, ALX_DRV_PHY); if (ALX_DRV_PHY_UNKNOWN == val) return ALX_DRV_PHY_UNKNOWN; alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val); if (ALX_PHY_INITED == phy_val) return val; return ALX_DRV_PHY_UNKNOWN; } static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val) { u32 read; int i; for (i = 0; i < ALX_SLD_MAX_TO; i++) { read = alx_read_mem32(hw, reg); if ((read & wait) == 0) { if (val) *val = read; return true; } mdelay(1); } return false; } static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr) { u32 mac0, mac1; mac0 = alx_read_mem32(hw, ALX_STAD0); mac1 = alx_read_mem32(hw, ALX_STAD1); /* addr should be big-endian */ put_unaligned(cpu_to_be32(mac0), (__be32 *)(addr + 2)); put_unaligned(cpu_to_be16(mac1), (__be16 *)addr); return is_valid_ether_addr(addr); } int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr) { u32 val; /* try to get it from register first */ if (alx_read_macaddr(hw, addr)) return 0; /* try to load from efuse */ if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val)) return -EIO; alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START); if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL)) return -EIO; if (alx_read_macaddr(hw, addr)) return 0; /* try to load from flash/eeprom (if present) */ val = alx_read_mem32(hw, ALX_EFLD); if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) { if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_STAT | ALX_EFLD_START, &val)) return -EIO; alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START); if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL)) return -EIO; if (alx_read_macaddr(hw, addr)) return 0; } return -EIO; } void alx_set_macaddr(struct alx_hw *hw, const u8 *addr) { u32 val; /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */ val = be32_to_cpu(get_unaligned((__be32 *)(addr + 2))); alx_write_mem32(hw, ALX_STAD0, val); val = be16_to_cpu(get_unaligned((__be16 *)addr)); alx_write_mem32(hw, ALX_STAD1, val); } static void alx_reset_osc(struct alx_hw *hw, u8 rev) { u32 val, val2; /* clear Internal OSC settings, switching OSC by hw itself */ val = alx_read_mem32(hw, ALX_MISC3); alx_write_mem32(hw, ALX_MISC3, (val & ~ALX_MISC3_25M_BY_SW) | ALX_MISC3_25M_NOTO_INTNL); /* 25M clk from chipset may be unstable 1s after de-assert of * PERST, driver need re-calibrate before enter Sleep for WoL */ val = alx_read_mem32(hw, ALX_MISC); if (rev >= ALX_REV_B0) { /* restore over current protection def-val, * this val could be reset by MAC-RST */ ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF); /* a 0->1 change will update the internal val of osc */ val &= ~ALX_MISC_INTNLOSC_OPEN; alx_write_mem32(hw, ALX_MISC, val); alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); /* hw will automatically dis OSC after cab. */ val2 = alx_read_mem32(hw, ALX_MSIC2); val2 &= ~ALX_MSIC2_CALB_START; alx_write_mem32(hw, ALX_MSIC2, val2); alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START); } else { val &= ~ALX_MISC_INTNLOSC_OPEN; /* disable isolate for rev A devices */ if (alx_is_rev_a(rev)) val &= ~ALX_MISC_ISO_EN; alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); alx_write_mem32(hw, ALX_MISC, val); } udelay(20); } static int alx_stop_mac(struct alx_hw *hw) { u32 rxq, txq, val; u16 i; rxq = alx_read_mem32(hw, ALX_RXQ0); alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN); txq = alx_read_mem32(hw, ALX_TXQ0); alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN); udelay(40); hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN); alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) { val = alx_read_mem32(hw, ALX_MAC_STS); if (!(val & ALX_MAC_STS_IDLE)) return 0; udelay(10); } return -ETIMEDOUT; } int alx_reset_mac(struct alx_hw *hw) { u32 val, pmctrl; int i, ret; u8 rev; bool a_cr; pmctrl = 0; rev = alx_hw_revision(hw); a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw); /* disable all interrupts, RXQ/TXQ */ alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF); alx_write_mem32(hw, ALX_IMR, 0); alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS); ret = alx_stop_mac(hw); if (ret) return ret; /* mac reset workaroud */ alx_write_mem32(hw, ALX_RFD_PIDX, 1); /* dis l0s/l1 before mac reset */ if (a_cr) { pmctrl = alx_read_mem32(hw, ALX_PMCTRL); if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN)) alx_write_mem32(hw, ALX_PMCTRL, pmctrl & ~(ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN)); } /* reset whole mac safely */ val = alx_read_mem32(hw, ALX_MASTER); alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS); /* make sure it's real idle */ udelay(10); for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) { val = alx_read_mem32(hw, ALX_RFD_PIDX); if (val == 0) break; udelay(10); } for (; i < ALX_DMA_MAC_RST_TO; i++) { val = alx_read_mem32(hw, ALX_MASTER); if ((val & ALX_MASTER_DMA_MAC_RST) == 0) break; udelay(10); } if (i == ALX_DMA_MAC_RST_TO) return -EIO; udelay(10); if (a_cr) { alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS); /* restore l0s / l1 */ if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN)) alx_write_mem32(hw, ALX_PMCTRL, pmctrl); } alx_reset_osc(hw, rev); /* clear Internal OSC settings, switching OSC by hw itself, * disable isolate for rev A devices */ val = alx_read_mem32(hw, ALX_MISC3); alx_write_mem32(hw, ALX_MISC3, (val & ~ALX_MISC3_25M_BY_SW) | ALX_MISC3_25M_NOTO_INTNL); val = alx_read_mem32(hw, ALX_MISC); val &= ~ALX_MISC_INTNLOSC_OPEN; if (alx_is_rev_a(rev)) val &= ~ALX_MISC_ISO_EN; alx_write_mem32(hw, ALX_MISC, val); udelay(20); /* driver control speed/duplex, hash-alg */ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); val = alx_read_mem32(hw, ALX_SERDES); alx_write_mem32(hw, ALX_SERDES, val | ALX_SERDES_MACCLK_SLWDWN | ALX_SERDES_PHYCLK_SLWDWN); return 0; } void alx_reset_phy(struct alx_hw *hw) { int i; u32 val; u16 phy_val; /* (DSP)reset PHY core */ val = alx_read_mem32(hw, ALX_PHY_CTRL); val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN | ALX_PHY_CTRL_CLS); val |= ALX_PHY_CTRL_RST_ANALOG; val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN); alx_write_mem32(hw, ALX_PHY_CTRL, val); udelay(10); alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT); for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++) udelay(10); /* phy power saving & hib */ alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF); alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL, ALX_SYSMODCTRL_IECHOADJ_DEF); alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS, ALX_VDRVBIAS_DEF); /* EEE advertisement */ val = alx_read_mem32(hw, ALX_LPI_CTRL); alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN); alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0); /* phy power saving */ alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF); alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF); alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF); alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF); alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val); alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN); /* rtl8139c, 120m issue */ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78, ALX_MIIEXT_NLP78_120M_DEF); alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10, ALX_MIIEXT_S3DIG10_DEF); if (hw->lnk_patch) { /* Turn off half amplitude */ alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3, &phy_val); alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3, phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT); /* Turn off Green feature */ alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val); alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, phy_val | ALX_GREENCFG2_BP_GREEN); /* Turn off half Bias */ alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5, &phy_val); alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5, phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS); } /* set phy interrupt mask */ alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN); } #define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO) void alx_reset_pcie(struct alx_hw *hw) { u8 rev = alx_hw_revision(hw); u32 val; u16 val16; /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ pci_read_config_word(hw->pdev, PCI_COMMAND, &val16); if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) { val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(hw->pdev, PCI_COMMAND, val16); } /* clear WoL setting/status */ val = alx_read_mem32(hw, ALX_WOL0); alx_write_mem32(hw, ALX_WOL0, 0); val = alx_read_mem32(hw, ALX_PDLL_TRNS1); alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN); /* mask some pcie error bits */ val = alx_read_mem32(hw, ALX_UE_SVRT); val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR); alx_write_mem32(hw, ALX_UE_SVRT, val); /* wol 25M & pclk */ val = alx_read_mem32(hw, ALX_MASTER); if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) { if ((val & ALX_MASTER_WAKEN_25M) == 0 || (val & ALX_MASTER_PCLKSEL_SRDS) == 0) alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS | ALX_MASTER_WAKEN_25M); } else { if ((val & ALX_MASTER_WAKEN_25M) == 0 || (val & ALX_MASTER_PCLKSEL_SRDS) != 0) alx_write_mem32(hw, ALX_MASTER, (val & ~ALX_MASTER_PCLKSEL_SRDS) | ALX_MASTER_WAKEN_25M); } /* ASPM setting */ alx_enable_aspm(hw, true, true); udelay(10); } void alx_start_mac(struct alx_hw *hw) { u32 mac, txq, rxq; rxq = alx_read_mem32(hw, ALX_RXQ0); alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN); txq = alx_read_mem32(hw, ALX_TXQ0); alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN); mac = hw->rx_ctrl; if (hw->duplex == DUPLEX_FULL) mac |= ALX_MAC_CTRL_FULLD; else mac &= ~ALX_MAC_CTRL_FULLD; ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, hw->link_speed == SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 : ALX_MAC_CTRL_SPEED_10_100); mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN; hw->rx_ctrl = mac; alx_write_mem32(hw, ALX_MAC_CTRL, mac); } void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc) { if (fc & ALX_FC_RX) hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN; else hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN; if (fc & ALX_FC_TX) hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN; else hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN; alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); } void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en) { u32 pmctrl; u8 rev = alx_hw_revision(hw); pmctrl = alx_read_mem32(hw, ALX_PMCTRL); ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER, ALX_PMCTRL_LCKDET_TIMER_DEF); pmctrl |= ALX_PMCTRL_RCVR_WT_1US | ALX_PMCTRL_L1_CLKSW_EN | ALX_PMCTRL_L1_SRDSRX_PWD; ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF); ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US); pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN | ALX_PMCTRL_L1_BUFSRX_EN | ALX_PMCTRL_SADLY_EN | ALX_PMCTRL_HOTRST_WTEN| ALX_PMCTRL_L0S_EN | ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN | ALX_PMCTRL_TXL1_AFTER_L0S | ALX_PMCTRL_RXL1_AFTER_L0S); if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN; if (l0s_en) pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN); if (l1_en) pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN); alx_write_mem32(hw, ALX_PMCTRL, pmctrl); } static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg) { u32 cfg = 0; if (ethadv_cfg & ADVERTISED_Autoneg) { cfg |= ALX_DRV_PHY_AUTO; if (ethadv_cfg & ADVERTISED_10baseT_Half) cfg |= ALX_DRV_PHY_10; if (ethadv_cfg & ADVERTISED_10baseT_Full) cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX; if (ethadv_cfg & ADVERTISED_100baseT_Half) cfg |= ALX_DRV_PHY_100; if (ethadv_cfg & ADVERTISED_100baseT_Full) cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; if (ethadv_cfg & ADVERTISED_1000baseT_Half) cfg |= ALX_DRV_PHY_1000; if (ethadv_cfg & ADVERTISED_1000baseT_Full) cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; if (ethadv_cfg & ADVERTISED_Pause) cfg |= ADVERTISE_PAUSE_CAP; if (ethadv_cfg & ADVERTISED_Asym_Pause) cfg |= ADVERTISE_PAUSE_ASYM; } else { switch (ethadv_cfg) { case ADVERTISED_10baseT_Half: cfg |= ALX_DRV_PHY_10; break; case ADVERTISED_100baseT_Half: cfg |= ALX_DRV_PHY_100; break; case ADVERTISED_10baseT_Full: cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX; break; case ADVERTISED_100baseT_Full: cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; break; } } return cfg; } int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl) { u16 adv, giga, cr; u32 val; int err = 0; alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0); val = alx_read_mem32(hw, ALX_DRV); ALX_SET_FIELD(val, ALX_DRV_PHY, 0); if (ethadv & ADVERTISED_Autoneg) { adv = ADVERTISE_CSMA; adv |= ethtool_adv_to_mii_adv_t(ethadv); if (flowctrl & ALX_FC_ANEG) { if (flowctrl & ALX_FC_RX) { adv |= ADVERTISED_Pause; if (!(flowctrl & ALX_FC_TX)) adv |= ADVERTISED_Asym_Pause; } else if (flowctrl & ALX_FC_TX) { adv |= ADVERTISED_Asym_Pause; } } giga = 0; if (alx_hw_giga(hw)) giga = ethtool_adv_to_mii_ctrl1000_t(ethadv); cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART; if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) || alx_write_phy_reg(hw, MII_CTRL1000, giga) || alx_write_phy_reg(hw, MII_BMCR, cr)) err = -EBUSY; } else { cr = BMCR_RESET; if (ethadv == ADVERTISED_100baseT_Half || ethadv == ADVERTISED_100baseT_Full) cr |= BMCR_SPEED100; if (ethadv == ADVERTISED_10baseT_Full || ethadv == ADVERTISED_100baseT_Full) cr |= BMCR_FULLDPLX; err = alx_write_phy_reg(hw, MII_BMCR, cr); } if (!err) { alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED); val |= ethadv_to_hw_cfg(hw, ethadv); } alx_write_mem32(hw, ALX_DRV, val); return err; } void alx_post_phy_link(struct alx_hw *hw) { u16 phy_val, len, agc; u8 revid = alx_hw_revision(hw); bool adj_th = revid == ALX_REV_B0; if (revid != ALX_REV_B0 && !alx_is_rev_a(revid)) return; /* 1000BT/AZ, wrong cable length */ if (hw->link_speed != SPEED_UNKNOWN) { alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6, &phy_val); len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN); alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val); agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA); if ((hw->link_speed == SPEED_1000 && (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G || (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) || (hw->link_speed == SPEED_100 && (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M || (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) { alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, ALX_AZ_ANADECT_LONG); alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, &phy_val); alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, phy_val | ALX_AFE_10BT_100M_TH); } else { alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, ALX_AZ_ANADECT_DEF); alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, &phy_val); alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, phy_val & ~ALX_AFE_10BT_100M_TH); } /* threshold adjust */ if (adj_th && hw->lnk_patch) { if (hw->link_speed == SPEED_100) { alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB, ALX_MSE16DB_UP); } else if (hw->link_speed == SPEED_1000) { /* * Giga link threshold, raise the tolerance of * noise 50% */ alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val); ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH, ALX_MSE20DB_TH_HI); alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val); } } } else { alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, &phy_val); alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, phy_val & ~ALX_AFE_10BT_100M_TH); if (adj_th && hw->lnk_patch) { alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB, ALX_MSE16DB_DOWN); alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val); ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH, ALX_MSE20DB_TH_DEF); alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val); } } } bool alx_phy_configured(struct alx_hw *hw) { u32 cfg, hw_cfg; cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg); cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY); hw_cfg = alx_get_phy_config(hw); if (hw_cfg == ALX_DRV_PHY_UNKNOWN) return false; return cfg == hw_cfg; } int alx_read_phy_link(struct alx_hw *hw) { struct pci_dev *pdev = hw->pdev; u16 bmsr, giga; int err; err = alx_read_phy_reg(hw, MII_BMSR, &bmsr); if (err) return err; err = alx_read_phy_reg(hw, MII_BMSR, &bmsr); if (err) return err; if (!(bmsr & BMSR_LSTATUS)) { hw->link_speed = SPEED_UNKNOWN; hw->duplex = DUPLEX_UNKNOWN; return 0; } /* speed/duplex result is saved in PHY Specific Status Register */ err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga); if (err) return err; if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED)) goto wrong_speed; switch (giga & ALX_GIGA_PSSR_SPEED) { case ALX_GIGA_PSSR_1000MBS: hw->link_speed = SPEED_1000; break; case ALX_GIGA_PSSR_100MBS: hw->link_speed = SPEED_100; break; case ALX_GIGA_PSSR_10MBS: hw->link_speed = SPEED_10; break; default: goto wrong_speed; } hw->duplex = (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF; return 0; wrong_speed: dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga); return -EINVAL; } int alx_clear_phy_intr(struct alx_hw *hw) { u16 isr; /* clear interrupt status by reading it */ return alx_read_phy_reg(hw, ALX_MII_ISR, &isr); } void alx_disable_rss(struct alx_hw *hw) { u32 ctrl = alx_read_mem32(hw, ALX_RXQ0); ctrl &= ~ALX_RXQ0_RSS_HASH_EN; alx_write_mem32(hw, ALX_RXQ0, ctrl); } void alx_configure_basic(struct alx_hw *hw) { u32 val, raw_mtu, max_payload; u16 val16; u8 chip_rev = alx_hw_revision(hw); alx_set_macaddr(hw, hw->mac_addr); alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL); /* idle timeout to switch clk_125M */ if (chip_rev >= ALX_REV_B0) alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER, ALX_IDLE_DECISN_TIMER_DEF); alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL); val = alx_read_mem32(hw, ALX_MASTER); val |= ALX_MASTER_IRQMOD2_EN | ALX_MASTER_IRQMOD1_EN | ALX_MASTER_SYSALVTIMER_EN; alx_write_mem32(hw, ALX_MASTER, val); alx_write_mem32(hw, ALX_IRQ_MODU_TIMER, (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT); /* intr re-trig timeout */ alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO); /* tpd threshold to trig int */ alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd); alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt); raw_mtu = ALX_RAW_MTU(hw->mtu); alx_write_mem32(hw, ALX_MTU, raw_mtu); if (raw_mtu > (ALX_MTU_JUMBO_TH + ETH_FCS_LEN + VLAN_HLEN)) hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE; if (raw_mtu < ALX_TXQ1_JUMBO_TSO_TH) val = (raw_mtu + 7) >> 3; else val = ALX_TXQ1_JUMBO_TSO_TH >> 3; alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN); max_payload = pcie_get_readrq(hw->pdev) >> 8; /* * if BIOS had changed the default dma read max length, * restore it to default value */ if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN) pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN); val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT | ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN | ALX_TXQ0_SUPT_IPOPT | ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT; alx_write_mem32(hw, ALX_TXQ0, val); val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT | ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT | ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT | ALX_HQTPD_BURST_EN; alx_write_mem32(hw, ALX_HQTPD, val); /* rxq, flow control */ val = alx_read_mem32(hw, ALX_SRAM5); val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3; if (val > ALX_SRAM_RXF_LEN_8K) { val16 = ALX_MTU_STD_ALGN >> 3; val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3; } else { val16 = ALX_MTU_STD_ALGN >> 3; val = (val - ALX_MTU_STD_ALGN) >> 3; } alx_write_mem32(hw, ALX_RXQ2, val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT | val << ALX_RXQ2_RXF_XON_THRESH_SHIFT); val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT | ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT | ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT | ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN | ALX_RXQ0_IPV6_PARSE_EN; if (alx_hw_giga(hw)) ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH, ALX_RXQ0_ASPM_THRESH_100M); alx_write_mem32(hw, ALX_RXQ0, val); val = alx_read_mem32(hw, ALX_DMA); val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT | ALX_DMA_RREQ_PRI_DATA | max_payload << ALX_DMA_RREQ_BLEN_SHIFT | ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT | ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT | (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT; alx_write_mem32(hw, ALX_DMA, val); /* default multi-tx-q weights */ val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT | 4 << ALX_WRR_PRI0_SHIFT | 4 << ALX_WRR_PRI1_SHIFT | 4 << ALX_WRR_PRI2_SHIFT | 4 << ALX_WRR_PRI3_SHIFT; alx_write_mem32(hw, ALX_WRR, val); } void alx_mask_msix(struct alx_hw *hw, int index, bool mask) { u32 reg, val; reg = ALX_MSIX_ENTRY_BASE + index * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL; val = mask ? PCI_MSIX_ENTRY_CTRL_MASKBIT : 0; alx_write_mem32(hw, reg, val); alx_post_write(hw); } bool alx_get_phy_info(struct alx_hw *hw) { u16 devs1, devs2; if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) || alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1])) return false; /* since we haven't PMA/PMD status2 register, we can't * use mdio45_probe function for prtad and mmds. * use fixed MMD3 to get mmds. */ if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) || alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2)) return false; hw->mdio.mmds = devs1 | devs2 << 16; return true; } void alx_update_hw_stats(struct alx_hw *hw) { /* RX stats */ hw->stats.rx_ok += alx_read_mem32(hw, ALX_MIB_RX_OK); hw->stats.rx_bcast += alx_read_mem32(hw, ALX_MIB_RX_BCAST); hw->stats.rx_mcast += alx_read_mem32(hw, ALX_MIB_RX_MCAST); hw->stats.rx_pause += alx_read_mem32(hw, ALX_MIB_RX_PAUSE); hw->stats.rx_ctrl += alx_read_mem32(hw, ALX_MIB_RX_CTRL); hw->stats.rx_fcs_err += alx_read_mem32(hw, ALX_MIB_RX_FCS_ERR); hw->stats.rx_len_err += alx_read_mem32(hw, ALX_MIB_RX_LEN_ERR); hw->stats.rx_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BYTE_CNT); hw->stats.rx_runt += alx_read_mem32(hw, ALX_MIB_RX_RUNT); hw->stats.rx_frag += alx_read_mem32(hw, ALX_MIB_RX_FRAG); hw->stats.rx_sz_64B += alx_read_mem32(hw, ALX_MIB_RX_SZ_64B); hw->stats.rx_sz_127B += alx_read_mem32(hw, ALX_MIB_RX_SZ_127B); hw->stats.rx_sz_255B += alx_read_mem32(hw, ALX_MIB_RX_SZ_255B); hw->stats.rx_sz_511B += alx_read_mem32(hw, ALX_MIB_RX_SZ_511B); hw->stats.rx_sz_1023B += alx_read_mem32(hw, ALX_MIB_RX_SZ_1023B); hw->stats.rx_sz_1518B += alx_read_mem32(hw, ALX_MIB_RX_SZ_1518B); hw->stats.rx_sz_max += alx_read_mem32(hw, ALX_MIB_RX_SZ_MAX); hw->stats.rx_ov_sz += alx_read_mem32(hw, ALX_MIB_RX_OV_SZ); hw->stats.rx_ov_rxf += alx_read_mem32(hw, ALX_MIB_RX_OV_RXF); hw->stats.rx_ov_rrd += alx_read_mem32(hw, ALX_MIB_RX_OV_RRD); hw->stats.rx_align_err += alx_read_mem32(hw, ALX_MIB_RX_ALIGN_ERR); hw->stats.rx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BCCNT); hw->stats.rx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_MCCNT); hw->stats.rx_err_addr += alx_read_mem32(hw, ALX_MIB_RX_ERRADDR); /* TX stats */ hw->stats.tx_ok += alx_read_mem32(hw, ALX_MIB_TX_OK); hw->stats.tx_bcast += alx_read_mem32(hw, ALX_MIB_TX_BCAST); hw->stats.tx_mcast += alx_read_mem32(hw, ALX_MIB_TX_MCAST); hw->stats.tx_pause += alx_read_mem32(hw, ALX_MIB_TX_PAUSE); hw->stats.tx_exc_defer += alx_read_mem32(hw, ALX_MIB_TX_EXC_DEFER); hw->stats.tx_ctrl += alx_read_mem32(hw, ALX_MIB_TX_CTRL); hw->stats.tx_defer += alx_read_mem32(hw, ALX_MIB_TX_DEFER); hw->stats.tx_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BYTE_CNT); hw->stats.tx_sz_64B += alx_read_mem32(hw, ALX_MIB_TX_SZ_64B); hw->stats.tx_sz_127B += alx_read_mem32(hw, ALX_MIB_TX_SZ_127B); hw->stats.tx_sz_255B += alx_read_mem32(hw, ALX_MIB_TX_SZ_255B); hw->stats.tx_sz_511B += alx_read_mem32(hw, ALX_MIB_TX_SZ_511B); hw->stats.tx_sz_1023B += alx_read_mem32(hw, ALX_MIB_TX_SZ_1023B); hw->stats.tx_sz_1518B += alx_read_mem32(hw, ALX_MIB_TX_SZ_1518B); hw->stats.tx_sz_max += alx_read_mem32(hw, ALX_MIB_TX_SZ_MAX); hw->stats.tx_single_col += alx_read_mem32(hw, ALX_MIB_TX_SINGLE_COL); hw->stats.tx_multi_col += alx_read_mem32(hw, ALX_MIB_TX_MULTI_COL); hw->stats.tx_late_col += alx_read_mem32(hw, ALX_MIB_TX_LATE_COL); hw->stats.tx_abort_col += alx_read_mem32(hw, ALX_MIB_TX_ABORT_COL); hw->stats.tx_underrun += alx_read_mem32(hw, ALX_MIB_TX_UNDERRUN); hw->stats.tx_trd_eop += alx_read_mem32(hw, ALX_MIB_TX_TRD_EOP); hw->stats.tx_len_err += alx_read_mem32(hw, ALX_MIB_TX_LEN_ERR); hw->stats.tx_trunc += alx_read_mem32(hw, ALX_MIB_TX_TRUNC); hw->stats.tx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BCCNT); hw->stats.tx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_MCCNT); hw->stats.update += alx_read_mem32(hw, ALX_MIB_UPDATE); }
linux-master
drivers/net/ethernet/atheros/alx/hw.c
/* * Copyright (c) 2013 Johannes Berg <[email protected]> * * This file is free software: you may copy, redistribute and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation, either version 2 of the License, or (at your * option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * This file incorporates work covered by the following copyright and * permission notice: * * Copyright (c) 2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/pci.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mdio.h> #include <linux/interrupt.h> #include <asm/byteorder.h> #include "alx.h" #include "reg.h" #include "hw.h" /* The order of these strings must match the order of the fields in * struct alx_hw_stats * See hw.h */ static const char alx_gstrings_stats[][ETH_GSTRING_LEN] = { "rx_packets", "rx_bcast_packets", "rx_mcast_packets", "rx_pause_packets", "rx_ctrl_packets", "rx_fcs_errors", "rx_length_errors", "rx_bytes", "rx_runt_packets", "rx_fragments", "rx_64B_or_less_packets", "rx_65B_to_127B_packets", "rx_128B_to_255B_packets", "rx_256B_to_511B_packets", "rx_512B_to_1023B_packets", "rx_1024B_to_1518B_packets", "rx_1519B_to_mtu_packets", "rx_oversize_packets", "rx_rxf_ov_drop_packets", "rx_rrd_ov_drop_packets", "rx_align_errors", "rx_bcast_bytes", "rx_mcast_bytes", "rx_address_errors", "tx_packets", "tx_bcast_packets", "tx_mcast_packets", "tx_pause_packets", "tx_exc_defer_packets", "tx_ctrl_packets", "tx_defer_packets", "tx_bytes", "tx_64B_or_less_packets", "tx_65B_to_127B_packets", "tx_128B_to_255B_packets", "tx_256B_to_511B_packets", "tx_512B_to_1023B_packets", "tx_1024B_to_1518B_packets", "tx_1519B_to_mtu_packets", "tx_single_collision", "tx_multiple_collisions", "tx_late_collision", "tx_abort_collision", "tx_underrun", "tx_trd_eop", "tx_length_errors", "tx_trunc_packets", "tx_bcast_bytes", "tx_mcast_bytes", "tx_update", }; #define ALX_NUM_STATS ARRAY_SIZE(alx_gstrings_stats) static u32 alx_get_supported_speeds(struct alx_hw *hw) { u32 supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; if (alx_hw_giga(hw)) supported |= SUPPORTED_1000baseT_Full; BUILD_BUG_ON(SUPPORTED_10baseT_Half != ADVERTISED_10baseT_Half); BUILD_BUG_ON(SUPPORTED_10baseT_Full != ADVERTISED_10baseT_Full); BUILD_BUG_ON(SUPPORTED_100baseT_Half != ADVERTISED_100baseT_Half); BUILD_BUG_ON(SUPPORTED_100baseT_Full != ADVERTISED_100baseT_Full); BUILD_BUG_ON(SUPPORTED_1000baseT_Full != ADVERTISED_1000baseT_Full); return supported; } static int alx_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; u32 supported, advertising; supported = SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_Pause | SUPPORTED_Asym_Pause; if (alx_hw_giga(hw)) supported |= SUPPORTED_1000baseT_Full; supported |= alx_get_supported_speeds(hw); advertising = ADVERTISED_TP; if (hw->adv_cfg & ADVERTISED_Autoneg) advertising |= hw->adv_cfg; cmd->base.port = PORT_TP; cmd->base.phy_address = 0; if (hw->adv_cfg & ADVERTISED_Autoneg) cmd->base.autoneg = AUTONEG_ENABLE; else cmd->base.autoneg = AUTONEG_DISABLE; if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) { if (hw->flowctrl & ALX_FC_RX) { advertising |= ADVERTISED_Pause; if (!(hw->flowctrl & ALX_FC_TX)) advertising |= ADVERTISED_Asym_Pause; } else if (hw->flowctrl & ALX_FC_TX) { advertising |= ADVERTISED_Asym_Pause; } } mutex_lock(&alx->mtx); cmd->base.speed = hw->link_speed; cmd->base.duplex = hw->duplex; mutex_unlock(&alx->mtx); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); return 0; } static int alx_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; u32 adv_cfg; u32 advertising; int ret; ethtool_convert_link_mode_to_legacy_u32(&advertising, cmd->link_modes.advertising); if (cmd->base.autoneg == AUTONEG_ENABLE) { if (advertising & ~alx_get_supported_speeds(hw)) return -EINVAL; adv_cfg = advertising | ADVERTISED_Autoneg; } else { adv_cfg = alx_speed_to_ethadv(cmd->base.speed, cmd->base.duplex); if (!adv_cfg || adv_cfg == ADVERTISED_1000baseT_Full) return -EINVAL; } hw->adv_cfg = adv_cfg; mutex_lock(&alx->mtx); ret = alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl); mutex_unlock(&alx->mtx); return ret; } static void alx_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; mutex_lock(&alx->mtx); pause->autoneg = !!(hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg); pause->tx_pause = !!(hw->flowctrl & ALX_FC_TX); pause->rx_pause = !!(hw->flowctrl & ALX_FC_RX); mutex_unlock(&alx->mtx); } static int alx_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; int err = 0; bool reconfig_phy = false; u8 fc = 0; if (pause->tx_pause) fc |= ALX_FC_TX; if (pause->rx_pause) fc |= ALX_FC_RX; if (pause->autoneg) fc |= ALX_FC_ANEG; mutex_lock(&alx->mtx); /* restart auto-neg for auto-mode */ if (hw->adv_cfg & ADVERTISED_Autoneg) { if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG)) reconfig_phy = true; if (fc & hw->flowctrl & ALX_FC_ANEG && (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX)) reconfig_phy = true; } if (reconfig_phy) { err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc); if (err) { mutex_unlock(&alx->mtx); return err; } } /* flow control on mac */ if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX)) alx_cfg_mac_flowcontrol(hw, fc); hw->flowctrl = fc; mutex_unlock(&alx->mtx); return 0; } static u32 alx_get_msglevel(struct net_device *netdev) { struct alx_priv *alx = netdev_priv(netdev); return alx->msg_enable; } static void alx_set_msglevel(struct net_device *netdev, u32 data) { struct alx_priv *alx = netdev_priv(netdev); alx->msg_enable = data; } static void alx_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *estats, u64 *data) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; spin_lock(&alx->stats_lock); alx_update_hw_stats(hw); BUILD_BUG_ON(sizeof(hw->stats) != ALX_NUM_STATS * sizeof(u64)); memcpy(data, &hw->stats, sizeof(hw->stats)); spin_unlock(&alx->stats_lock); } static void alx_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) { switch (stringset) { case ETH_SS_STATS: memcpy(buf, &alx_gstrings_stats, sizeof(alx_gstrings_stats)); break; default: WARN_ON(1); break; } } static int alx_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_STATS: return ALX_NUM_STATS; default: return -EINVAL; } } const struct ethtool_ops alx_ethtool_ops = { .get_pauseparam = alx_get_pauseparam, .set_pauseparam = alx_set_pauseparam, .get_msglevel = alx_get_msglevel, .set_msglevel = alx_set_msglevel, .get_link = ethtool_op_get_link, .get_strings = alx_get_strings, .get_sset_count = alx_get_sset_count, .get_ethtool_stats = alx_get_ethtool_stats, .get_link_ksettings = alx_get_link_ksettings, .set_link_ksettings = alx_set_link_ksettings, };
linux-master
drivers/net/ethernet/atheros/alx/ethtool.c
/* * Copyright (c) 2013, 2021 Johannes Berg <[email protected]> * * This file is free software: you may copy, redistribute and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation, either version 2 of the License, or (at your * option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * This file incorporates work covered by the following copyright and * permission notice: * * Copyright (c) 2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/if_vlan.h> #include <linux/mdio.h> #include <linux/bitops.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <net/ip6_checksum.h> #include <linux/crc32.h> #include "alx.h" #include "hw.h" #include "reg.h" static const char alx_drv_name[] = "alx"; static void alx_free_txbuf(struct alx_tx_queue *txq, int entry) { struct alx_buffer *txb = &txq->bufs[entry]; if (dma_unmap_len(txb, size)) { dma_unmap_single(txq->dev, dma_unmap_addr(txb, dma), dma_unmap_len(txb, size), DMA_TO_DEVICE); dma_unmap_len_set(txb, size, 0); } if (txb->skb) { dev_kfree_skb_any(txb->skb); txb->skb = NULL; } } static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) { struct alx_rx_queue *rxq = alx->qnapi[0]->rxq; struct sk_buff *skb; struct alx_buffer *cur_buf; dma_addr_t dma; u16 cur, next, count = 0; next = cur = rxq->write_idx; if (++next == alx->rx_ringsz) next = 0; cur_buf = &rxq->bufs[cur]; while (!cur_buf->skb && next != rxq->read_idx) { struct alx_rfd *rfd = &rxq->rfd[cur]; /* * When DMA RX address is set to something like * 0x....fc0, it will be very likely to cause DMA * RFD overflow issue. * * To work around it, we apply rx skb with 64 bytes * longer space, and offset the address whenever * 0x....fc0 is detected. */ skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp); if (!skb) break; if (((unsigned long)skb->data & 0xfff) == 0xfc0) skb_reserve(skb, 64); dma = dma_map_single(&alx->hw.pdev->dev, skb->data, alx->rxbuf_size, DMA_FROM_DEVICE); if (dma_mapping_error(&alx->hw.pdev->dev, dma)) { dev_kfree_skb(skb); break; } /* Unfortunately, RX descriptor buffers must be 4-byte * aligned, so we can't use IP alignment. */ if (WARN_ON(dma & 3)) { dev_kfree_skb(skb); break; } cur_buf->skb = skb; dma_unmap_len_set(cur_buf, size, alx->rxbuf_size); dma_unmap_addr_set(cur_buf, dma, dma); rfd->addr = cpu_to_le64(dma); cur = next; if (++next == alx->rx_ringsz) next = 0; cur_buf = &rxq->bufs[cur]; count++; } if (count) { /* flush all updates before updating hardware */ wmb(); rxq->write_idx = cur; alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); } return count; } static struct alx_tx_queue *alx_tx_queue_mapping(struct alx_priv *alx, struct sk_buff *skb) { unsigned int r_idx = skb->queue_mapping; if (r_idx >= alx->num_txq) r_idx = r_idx % alx->num_txq; return alx->qnapi[r_idx]->txq; } static struct netdev_queue *alx_get_tx_queue(const struct alx_tx_queue *txq) { return netdev_get_tx_queue(txq->netdev, txq->queue_idx); } static inline int alx_tpd_avail(struct alx_tx_queue *txq) { if (txq->write_idx >= txq->read_idx) return txq->count + txq->read_idx - txq->write_idx - 1; return txq->read_idx - txq->write_idx - 1; } static bool alx_clean_tx_irq(struct alx_tx_queue *txq) { struct alx_priv *alx; struct netdev_queue *tx_queue; u16 hw_read_idx, sw_read_idx; unsigned int total_bytes = 0, total_packets = 0; int budget = ALX_DEFAULT_TX_WORK; alx = netdev_priv(txq->netdev); tx_queue = alx_get_tx_queue(txq); sw_read_idx = txq->read_idx; hw_read_idx = alx_read_mem16(&alx->hw, txq->c_reg); if (sw_read_idx != hw_read_idx) { while (sw_read_idx != hw_read_idx && budget > 0) { struct sk_buff *skb; skb = txq->bufs[sw_read_idx].skb; if (skb) { total_bytes += skb->len; total_packets++; budget--; } alx_free_txbuf(txq, sw_read_idx); if (++sw_read_idx == txq->count) sw_read_idx = 0; } txq->read_idx = sw_read_idx; netdev_tx_completed_queue(tx_queue, total_packets, total_bytes); } if (netif_tx_queue_stopped(tx_queue) && netif_carrier_ok(alx->dev) && alx_tpd_avail(txq) > txq->count / 4) netif_tx_wake_queue(tx_queue); return sw_read_idx == hw_read_idx; } static void alx_schedule_link_check(struct alx_priv *alx) { schedule_work(&alx->link_check_wk); } static void alx_schedule_reset(struct alx_priv *alx) { schedule_work(&alx->reset_wk); } static int alx_clean_rx_irq(struct alx_rx_queue *rxq, int budget) { struct alx_priv *alx; struct alx_rrd *rrd; struct alx_buffer *rxb; struct sk_buff *skb; u16 length, rfd_cleaned = 0; int work = 0; alx = netdev_priv(rxq->netdev); while (work < budget) { rrd = &rxq->rrd[rxq->rrd_read_idx]; if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) break; rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT); if (ALX_GET_FIELD(le32_to_cpu(rrd->word0), RRD_SI) != rxq->read_idx || ALX_GET_FIELD(le32_to_cpu(rrd->word0), RRD_NOR) != 1) { alx_schedule_reset(alx); return work; } rxb = &rxq->bufs[rxq->read_idx]; dma_unmap_single(rxq->dev, dma_unmap_addr(rxb, dma), dma_unmap_len(rxb, size), DMA_FROM_DEVICE); dma_unmap_len_set(rxb, size, 0); skb = rxb->skb; rxb->skb = NULL; if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) || rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) { rrd->word3 = 0; dev_kfree_skb_any(skb); goto next_pkt; } length = ALX_GET_FIELD(le32_to_cpu(rrd->word3), RRD_PKTLEN) - ETH_FCS_LEN; skb_put(skb, length); skb->protocol = eth_type_trans(skb, rxq->netdev); skb_checksum_none_assert(skb); if (alx->dev->features & NETIF_F_RXCSUM && !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) | cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) { switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2), RRD_PID)) { case RRD_PID_IPV6UDP: case RRD_PID_IPV4UDP: case RRD_PID_IPV4TCP: case RRD_PID_IPV6TCP: skb->ip_summed = CHECKSUM_UNNECESSARY; break; } } napi_gro_receive(&rxq->np->napi, skb); work++; next_pkt: if (++rxq->read_idx == rxq->count) rxq->read_idx = 0; if (++rxq->rrd_read_idx == rxq->count) rxq->rrd_read_idx = 0; if (++rfd_cleaned > ALX_RX_ALLOC_THRESH) rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC); } if (rfd_cleaned) alx_refill_rx_ring(alx, GFP_ATOMIC); return work; } static int alx_poll(struct napi_struct *napi, int budget) { struct alx_napi *np = container_of(napi, struct alx_napi, napi); struct alx_priv *alx = np->alx; struct alx_hw *hw = &alx->hw; unsigned long flags; bool tx_complete = true; int work = 0; if (np->txq) tx_complete = alx_clean_tx_irq(np->txq); if (np->rxq) work = alx_clean_rx_irq(np->rxq, budget); if (!tx_complete || work == budget) return budget; napi_complete_done(&np->napi, work); /* enable interrupt */ if (alx->hw.pdev->msix_enabled) { alx_mask_msix(hw, np->vec_idx, false); } else { spin_lock_irqsave(&alx->irq_lock, flags); alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; alx_write_mem32(hw, ALX_IMR, alx->int_mask); spin_unlock_irqrestore(&alx->irq_lock, flags); } alx_post_write(hw); return work; } static bool alx_intr_handle_misc(struct alx_priv *alx, u32 intr) { struct alx_hw *hw = &alx->hw; if (intr & ALX_ISR_FATAL) { netif_warn(alx, hw, alx->dev, "fatal interrupt 0x%x, resetting\n", intr); alx_schedule_reset(alx); return true; } if (intr & ALX_ISR_ALERT) netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr); if (intr & ALX_ISR_PHY) { /* suppress PHY interrupt, because the source * is from PHY internal. only the internal status * is cleared, the interrupt status could be cleared. */ alx->int_mask &= ~ALX_ISR_PHY; alx_write_mem32(hw, ALX_IMR, alx->int_mask); alx_schedule_link_check(alx); } return false; } static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) { struct alx_hw *hw = &alx->hw; spin_lock(&alx->irq_lock); /* ACK interrupt */ alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS); intr &= alx->int_mask; if (alx_intr_handle_misc(alx, intr)) goto out; if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) { napi_schedule(&alx->qnapi[0]->napi); /* mask rx/tx interrupt, enable them when napi complete */ alx->int_mask &= ~ALX_ISR_ALL_QUEUES; alx_write_mem32(hw, ALX_IMR, alx->int_mask); } alx_write_mem32(hw, ALX_ISR, 0); out: spin_unlock(&alx->irq_lock); return IRQ_HANDLED; } static irqreturn_t alx_intr_msix_ring(int irq, void *data) { struct alx_napi *np = data; struct alx_hw *hw = &np->alx->hw; /* mask interrupt to ACK chip */ alx_mask_msix(hw, np->vec_idx, true); /* clear interrupt status */ alx_write_mem32(hw, ALX_ISR, np->vec_mask); napi_schedule(&np->napi); return IRQ_HANDLED; } static irqreturn_t alx_intr_msix_misc(int irq, void *data) { struct alx_priv *alx = data; struct alx_hw *hw = &alx->hw; u32 intr; /* mask interrupt to ACK chip */ alx_mask_msix(hw, 0, true); /* read interrupt status */ intr = alx_read_mem32(hw, ALX_ISR); intr &= (alx->int_mask & ~ALX_ISR_ALL_QUEUES); if (alx_intr_handle_misc(alx, intr)) return IRQ_HANDLED; /* clear interrupt status */ alx_write_mem32(hw, ALX_ISR, intr); /* enable interrupt again */ alx_mask_msix(hw, 0, false); return IRQ_HANDLED; } static irqreturn_t alx_intr_msi(int irq, void *data) { struct alx_priv *alx = data; return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR)); } static irqreturn_t alx_intr_legacy(int irq, void *data) { struct alx_priv *alx = data; struct alx_hw *hw = &alx->hw; u32 intr; intr = alx_read_mem32(hw, ALX_ISR); if (intr & ALX_ISR_DIS || !(intr & alx->int_mask)) return IRQ_NONE; return alx_intr_handle(alx, intr); } static const u16 txring_header_reg[] = {ALX_TPD_PRI0_ADDR_LO, ALX_TPD_PRI1_ADDR_LO, ALX_TPD_PRI2_ADDR_LO, ALX_TPD_PRI3_ADDR_LO}; static void alx_init_ring_ptrs(struct alx_priv *alx) { struct alx_hw *hw = &alx->hw; u32 addr_hi = ((u64)alx->descmem.dma) >> 32; struct alx_napi *np; int i; for (i = 0; i < alx->num_napi; i++) { np = alx->qnapi[i]; if (np->txq) { np->txq->read_idx = 0; np->txq->write_idx = 0; alx_write_mem32(hw, txring_header_reg[np->txq->queue_idx], np->txq->tpd_dma); } if (np->rxq) { np->rxq->read_idx = 0; np->rxq->write_idx = 0; np->rxq->rrd_read_idx = 0; alx_write_mem32(hw, ALX_RRD_ADDR_LO, np->rxq->rrd_dma); alx_write_mem32(hw, ALX_RFD_ADDR_LO, np->rxq->rfd_dma); } } alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi); alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz); alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi); alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz); alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz); alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size); /* load these pointers into the chip */ alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR); } static void alx_free_txring_buf(struct alx_tx_queue *txq) { int i; if (!txq->bufs) return; for (i = 0; i < txq->count; i++) alx_free_txbuf(txq, i); memset(txq->bufs, 0, txq->count * sizeof(struct alx_buffer)); memset(txq->tpd, 0, txq->count * sizeof(struct alx_txd)); txq->write_idx = 0; txq->read_idx = 0; netdev_tx_reset_queue(alx_get_tx_queue(txq)); } static void alx_free_rxring_buf(struct alx_rx_queue *rxq) { struct alx_buffer *cur_buf; u16 i; if (!rxq->bufs) return; for (i = 0; i < rxq->count; i++) { cur_buf = rxq->bufs + i; if (cur_buf->skb) { dma_unmap_single(rxq->dev, dma_unmap_addr(cur_buf, dma), dma_unmap_len(cur_buf, size), DMA_FROM_DEVICE); dev_kfree_skb(cur_buf->skb); cur_buf->skb = NULL; dma_unmap_len_set(cur_buf, size, 0); dma_unmap_addr_set(cur_buf, dma, 0); } } rxq->write_idx = 0; rxq->read_idx = 0; rxq->rrd_read_idx = 0; } static void alx_free_buffers(struct alx_priv *alx) { int i; for (i = 0; i < alx->num_txq; i++) if (alx->qnapi[i] && alx->qnapi[i]->txq) alx_free_txring_buf(alx->qnapi[i]->txq); if (alx->qnapi[0] && alx->qnapi[0]->rxq) alx_free_rxring_buf(alx->qnapi[0]->rxq); } static int alx_reinit_rings(struct alx_priv *alx) { alx_free_buffers(alx); alx_init_ring_ptrs(alx); if (!alx_refill_rx_ring(alx, GFP_KERNEL)) return -ENOMEM; return 0; } static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash) { u32 crc32, bit, reg; crc32 = ether_crc(ETH_ALEN, addr); reg = (crc32 >> 31) & 0x1; bit = (crc32 >> 26) & 0x1F; mc_hash[reg] |= BIT(bit); } static void __alx_set_rx_mode(struct net_device *netdev) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; struct netdev_hw_addr *ha; u32 mc_hash[2] = {}; if (!(netdev->flags & IFF_ALLMULTI)) { netdev_for_each_mc_addr(ha, netdev) alx_add_mc_addr(hw, ha->addr, mc_hash); alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]); alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]); } hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN); if (netdev->flags & IFF_PROMISC) hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN; if (netdev->flags & IFF_ALLMULTI) hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN; alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); } static void alx_set_rx_mode(struct net_device *netdev) { __alx_set_rx_mode(netdev); } static int alx_set_mac_address(struct net_device *netdev, void *data) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; struct sockaddr *addr = data; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (netdev->addr_assign_type & NET_ADDR_RANDOM) netdev->addr_assign_type ^= NET_ADDR_RANDOM; eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); alx_set_macaddr(hw, hw->mac_addr); return 0; } static int alx_alloc_tx_ring(struct alx_priv *alx, struct alx_tx_queue *txq, int offset) { txq->bufs = kcalloc(txq->count, sizeof(struct alx_buffer), GFP_KERNEL); if (!txq->bufs) return -ENOMEM; txq->tpd = alx->descmem.virt + offset; txq->tpd_dma = alx->descmem.dma + offset; offset += sizeof(struct alx_txd) * txq->count; return offset; } static int alx_alloc_rx_ring(struct alx_priv *alx, struct alx_rx_queue *rxq, int offset) { rxq->bufs = kcalloc(rxq->count, sizeof(struct alx_buffer), GFP_KERNEL); if (!rxq->bufs) return -ENOMEM; rxq->rrd = alx->descmem.virt + offset; rxq->rrd_dma = alx->descmem.dma + offset; offset += sizeof(struct alx_rrd) * rxq->count; rxq->rfd = alx->descmem.virt + offset; rxq->rfd_dma = alx->descmem.dma + offset; offset += sizeof(struct alx_rfd) * rxq->count; return offset; } static int alx_alloc_rings(struct alx_priv *alx) { int i, offset = 0; /* physical tx/rx ring descriptors * * Allocate them as a single chunk because they must not cross a * 4G boundary (hardware has a single register for high 32 bits * of addresses only) */ alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz * alx->num_txq + sizeof(struct alx_rrd) * alx->rx_ringsz + sizeof(struct alx_rfd) * alx->rx_ringsz; alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev, alx->descmem.size, &alx->descmem.dma, GFP_KERNEL); if (!alx->descmem.virt) return -ENOMEM; /* alignment requirements */ BUILD_BUG_ON(sizeof(struct alx_txd) % 8); BUILD_BUG_ON(sizeof(struct alx_rrd) % 8); for (i = 0; i < alx->num_txq; i++) { offset = alx_alloc_tx_ring(alx, alx->qnapi[i]->txq, offset); if (offset < 0) { netdev_err(alx->dev, "Allocation of tx buffer failed!\n"); return -ENOMEM; } } offset = alx_alloc_rx_ring(alx, alx->qnapi[0]->rxq, offset); if (offset < 0) { netdev_err(alx->dev, "Allocation of rx buffer failed!\n"); return -ENOMEM; } return 0; } static void alx_free_rings(struct alx_priv *alx) { int i; alx_free_buffers(alx); for (i = 0; i < alx->num_txq; i++) if (alx->qnapi[i] && alx->qnapi[i]->txq) kfree(alx->qnapi[i]->txq->bufs); if (alx->qnapi[0] && alx->qnapi[0]->rxq) kfree(alx->qnapi[0]->rxq->bufs); if (alx->descmem.virt) dma_free_coherent(&alx->hw.pdev->dev, alx->descmem.size, alx->descmem.virt, alx->descmem.dma); } static void alx_free_napis(struct alx_priv *alx) { struct alx_napi *np; int i; for (i = 0; i < alx->num_napi; i++) { np = alx->qnapi[i]; if (!np) continue; netif_napi_del(&np->napi); kfree(np->txq); kfree(np->rxq); kfree(np); alx->qnapi[i] = NULL; } } static const u16 tx_pidx_reg[] = {ALX_TPD_PRI0_PIDX, ALX_TPD_PRI1_PIDX, ALX_TPD_PRI2_PIDX, ALX_TPD_PRI3_PIDX}; static const u16 tx_cidx_reg[] = {ALX_TPD_PRI0_CIDX, ALX_TPD_PRI1_CIDX, ALX_TPD_PRI2_CIDX, ALX_TPD_PRI3_CIDX}; static const u32 tx_vect_mask[] = {ALX_ISR_TX_Q0, ALX_ISR_TX_Q1, ALX_ISR_TX_Q2, ALX_ISR_TX_Q3}; static const u32 rx_vect_mask[] = {ALX_ISR_RX_Q0, ALX_ISR_RX_Q1, ALX_ISR_RX_Q2, ALX_ISR_RX_Q3, ALX_ISR_RX_Q4, ALX_ISR_RX_Q5, ALX_ISR_RX_Q6, ALX_ISR_RX_Q7}; static int alx_alloc_napis(struct alx_priv *alx) { struct alx_napi *np; struct alx_rx_queue *rxq; struct alx_tx_queue *txq; int i; alx->int_mask &= ~ALX_ISR_ALL_QUEUES; /* allocate alx_napi structures */ for (i = 0; i < alx->num_napi; i++) { np = kzalloc(sizeof(struct alx_napi), GFP_KERNEL); if (!np) goto err_out; np->alx = alx; netif_napi_add(alx->dev, &np->napi, alx_poll); alx->qnapi[i] = np; } /* allocate tx queues */ for (i = 0; i < alx->num_txq; i++) { np = alx->qnapi[i]; txq = kzalloc(sizeof(*txq), GFP_KERNEL); if (!txq) goto err_out; np->txq = txq; txq->p_reg = tx_pidx_reg[i]; txq->c_reg = tx_cidx_reg[i]; txq->queue_idx = i; txq->count = alx->tx_ringsz; txq->netdev = alx->dev; txq->dev = &alx->hw.pdev->dev; np->vec_mask |= tx_vect_mask[i]; alx->int_mask |= tx_vect_mask[i]; } /* allocate rx queues */ np = alx->qnapi[0]; rxq = kzalloc(sizeof(*rxq), GFP_KERNEL); if (!rxq) goto err_out; np->rxq = rxq; rxq->np = alx->qnapi[0]; rxq->queue_idx = 0; rxq->count = alx->rx_ringsz; rxq->netdev = alx->dev; rxq->dev = &alx->hw.pdev->dev; np->vec_mask |= rx_vect_mask[0]; alx->int_mask |= rx_vect_mask[0]; return 0; err_out: netdev_err(alx->dev, "error allocating internal structures\n"); alx_free_napis(alx); return -ENOMEM; } static const int txq_vec_mapping_shift[] = { 0, ALX_MSI_MAP_TBL1_TXQ0_SHIFT, 0, ALX_MSI_MAP_TBL1_TXQ1_SHIFT, 1, ALX_MSI_MAP_TBL2_TXQ2_SHIFT, 1, ALX_MSI_MAP_TBL2_TXQ3_SHIFT, }; static void alx_config_vector_mapping(struct alx_priv *alx) { struct alx_hw *hw = &alx->hw; u32 tbl[2] = {0, 0}; int i, vector, idx, shift; if (alx->hw.pdev->msix_enabled) { /* tx mappings */ for (i = 0, vector = 1; i < alx->num_txq; i++, vector++) { idx = txq_vec_mapping_shift[i * 2]; shift = txq_vec_mapping_shift[i * 2 + 1]; tbl[idx] |= vector << shift; } /* rx mapping */ tbl[0] |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT; } alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl[0]); alx_write_mem32(hw, ALX_MSI_MAP_TBL2, tbl[1]); alx_write_mem32(hw, ALX_MSI_ID_MAP, 0); } static int alx_enable_msix(struct alx_priv *alx) { int err, num_vec, num_txq, num_rxq; num_txq = min_t(int, num_online_cpus(), ALX_MAX_TX_QUEUES); num_rxq = 1; num_vec = max_t(int, num_txq, num_rxq) + 1; err = pci_alloc_irq_vectors(alx->hw.pdev, num_vec, num_vec, PCI_IRQ_MSIX); if (err < 0) { netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n"); return err; } alx->num_vec = num_vec; alx->num_napi = num_vec - 1; alx->num_txq = num_txq; alx->num_rxq = num_rxq; return err; } static int alx_request_msix(struct alx_priv *alx) { struct net_device *netdev = alx->dev; int i, err, vector = 0, free_vector = 0; err = request_irq(pci_irq_vector(alx->hw.pdev, 0), alx_intr_msix_misc, 0, netdev->name, alx); if (err) goto out_err; for (i = 0; i < alx->num_napi; i++) { struct alx_napi *np = alx->qnapi[i]; vector++; if (np->txq && np->rxq) sprintf(np->irq_lbl, "%s-TxRx-%u", netdev->name, np->txq->queue_idx); else if (np->txq) sprintf(np->irq_lbl, "%s-tx-%u", netdev->name, np->txq->queue_idx); else if (np->rxq) sprintf(np->irq_lbl, "%s-rx-%u", netdev->name, np->rxq->queue_idx); else sprintf(np->irq_lbl, "%s-unused", netdev->name); np->vec_idx = vector; err = request_irq(pci_irq_vector(alx->hw.pdev, vector), alx_intr_msix_ring, 0, np->irq_lbl, np); if (err) goto out_free; } return 0; out_free: free_irq(pci_irq_vector(alx->hw.pdev, free_vector++), alx); vector--; for (i = 0; i < vector; i++) free_irq(pci_irq_vector(alx->hw.pdev,free_vector++), alx->qnapi[i]); out_err: return err; } static int alx_init_intr(struct alx_priv *alx) { int ret; ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY); if (ret < 0) return ret; alx->num_vec = 1; alx->num_napi = 1; alx->num_txq = 1; alx->num_rxq = 1; return 0; } static void alx_irq_enable(struct alx_priv *alx) { struct alx_hw *hw = &alx->hw; int i; /* level-1 interrupt switch */ alx_write_mem32(hw, ALX_ISR, 0); alx_write_mem32(hw, ALX_IMR, alx->int_mask); alx_post_write(hw); if (alx->hw.pdev->msix_enabled) { /* enable all msix irqs */ for (i = 0; i < alx->num_vec; i++) alx_mask_msix(hw, i, false); } } static void alx_irq_disable(struct alx_priv *alx) { struct alx_hw *hw = &alx->hw; int i; alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS); alx_write_mem32(hw, ALX_IMR, 0); alx_post_write(hw); if (alx->hw.pdev->msix_enabled) { for (i = 0; i < alx->num_vec; i++) { alx_mask_msix(hw, i, true); synchronize_irq(pci_irq_vector(alx->hw.pdev, i)); } } else { synchronize_irq(pci_irq_vector(alx->hw.pdev, 0)); } } static int alx_realloc_resources(struct alx_priv *alx) { int err; alx_free_rings(alx); alx_free_napis(alx); pci_free_irq_vectors(alx->hw.pdev); err = alx_init_intr(alx); if (err) return err; err = alx_alloc_napis(alx); if (err) return err; err = alx_alloc_rings(alx); if (err) return err; return 0; } static int alx_request_irq(struct alx_priv *alx) { struct pci_dev *pdev = alx->hw.pdev; struct alx_hw *hw = &alx->hw; int err; u32 msi_ctrl; msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT; if (alx->hw.pdev->msix_enabled) { alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl); err = alx_request_msix(alx); if (!err) goto out; /* msix request failed, realloc resources */ err = alx_realloc_resources(alx); if (err) goto out; } if (alx->hw.pdev->msi_enabled) { alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl | ALX_MSI_MASK_SEL_LINE); err = request_irq(pci_irq_vector(pdev, 0), alx_intr_msi, 0, alx->dev->name, alx); if (!err) goto out; /* fall back to legacy interrupt */ pci_free_irq_vectors(alx->hw.pdev); } alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0); err = request_irq(pci_irq_vector(pdev, 0), alx_intr_legacy, IRQF_SHARED, alx->dev->name, alx); out: if (!err) alx_config_vector_mapping(alx); else netdev_err(alx->dev, "IRQ registration failed!\n"); return err; } static void alx_free_irq(struct alx_priv *alx) { struct pci_dev *pdev = alx->hw.pdev; int i; free_irq(pci_irq_vector(pdev, 0), alx); if (alx->hw.pdev->msix_enabled) { for (i = 0; i < alx->num_napi; i++) free_irq(pci_irq_vector(pdev, i + 1), alx->qnapi[i]); } pci_free_irq_vectors(pdev); } static int alx_identify_hw(struct alx_priv *alx) { struct alx_hw *hw = &alx->hw; int rev = alx_hw_revision(hw); if (rev > ALX_REV_C0) return -EINVAL; hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2; return 0; } static int alx_init_sw(struct alx_priv *alx) { struct pci_dev *pdev = alx->hw.pdev; struct alx_hw *hw = &alx->hw; int err; err = alx_identify_hw(alx); if (err) { dev_err(&pdev->dev, "unrecognized chip, aborting\n"); return err; } alx->hw.lnk_patch = pdev->device == ALX_DEV_ID_AR8161 && pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC && pdev->subsystem_device == 0x0091 && pdev->revision == 0; hw->smb_timer = 400; hw->mtu = alx->dev->mtu; alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); /* MTU range: 34 - 9256 */ alx->dev->min_mtu = 34; alx->dev->max_mtu = ALX_MAX_FRAME_LEN(ALX_MAX_FRAME_SIZE); alx->tx_ringsz = 256; alx->rx_ringsz = 512; hw->imt = 200; alx->int_mask = ALX_ISR_MISC; hw->dma_chnl = hw->max_dma_chnl; hw->ith_tpd = alx->tx_ringsz / 3; hw->link_speed = SPEED_UNKNOWN; hw->duplex = DUPLEX_UNKNOWN; hw->adv_cfg = ADVERTISED_Autoneg | ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_1000baseT_Full; hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX; hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN | ALX_MAC_CTRL_MHASH_ALG_HI5B | ALX_MAC_CTRL_BRD_EN | ALX_MAC_CTRL_PCRCE | ALX_MAC_CTRL_CRCE | ALX_MAC_CTRL_RXFC_EN | ALX_MAC_CTRL_TXFC_EN | 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT; mutex_init(&alx->mtx); return 0; } static netdev_features_t alx_fix_features(struct net_device *netdev, netdev_features_t features) { if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE) features &= ~(NETIF_F_TSO | NETIF_F_TSO6); return features; } static void alx_netif_stop(struct alx_priv *alx) { int i; netif_trans_update(alx->dev); if (netif_carrier_ok(alx->dev)) { netif_carrier_off(alx->dev); netif_tx_disable(alx->dev); for (i = 0; i < alx->num_napi; i++) napi_disable(&alx->qnapi[i]->napi); } } static void alx_halt(struct alx_priv *alx) { struct alx_hw *hw = &alx->hw; lockdep_assert_held(&alx->mtx); alx_netif_stop(alx); hw->link_speed = SPEED_UNKNOWN; hw->duplex = DUPLEX_UNKNOWN; alx_reset_mac(hw); /* disable l0s/l1 */ alx_enable_aspm(hw, false, false); alx_irq_disable(alx); alx_free_buffers(alx); } static void alx_configure(struct alx_priv *alx) { struct alx_hw *hw = &alx->hw; alx_configure_basic(hw); alx_disable_rss(hw); __alx_set_rx_mode(alx->dev); alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); } static void alx_activate(struct alx_priv *alx) { lockdep_assert_held(&alx->mtx); /* hardware setting lost, restore it */ alx_reinit_rings(alx); alx_configure(alx); /* clear old interrupts */ alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); alx_irq_enable(alx); alx_schedule_link_check(alx); } static void alx_reinit(struct alx_priv *alx) { lockdep_assert_held(&alx->mtx); alx_halt(alx); alx_activate(alx); } static int alx_change_mtu(struct net_device *netdev, int mtu) { struct alx_priv *alx = netdev_priv(netdev); int max_frame = ALX_MAX_FRAME_LEN(mtu); netdev->mtu = mtu; alx->hw.mtu = mtu; alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); netdev_update_features(netdev); if (netif_running(netdev)) { mutex_lock(&alx->mtx); alx_reinit(alx); mutex_unlock(&alx->mtx); } return 0; } static void alx_netif_start(struct alx_priv *alx) { int i; netif_tx_wake_all_queues(alx->dev); for (i = 0; i < alx->num_napi; i++) napi_enable(&alx->qnapi[i]->napi); netif_carrier_on(alx->dev); } static int __alx_open(struct alx_priv *alx, bool resume) { int err; err = alx_enable_msix(alx); if (err < 0) { err = alx_init_intr(alx); if (err) return err; } if (!resume) netif_carrier_off(alx->dev); err = alx_alloc_napis(alx); if (err) goto out_disable_adv_intr; err = alx_alloc_rings(alx); if (err) goto out_free_rings; alx_configure(alx); err = alx_request_irq(alx); if (err) goto out_free_rings; /* must be called after alx_request_irq because the chip stops working * if we copy the dma addresses in alx_init_ring_ptrs twice when * requesting msi-x interrupts failed */ alx_reinit_rings(alx); netif_set_real_num_tx_queues(alx->dev, alx->num_txq); netif_set_real_num_rx_queues(alx->dev, alx->num_rxq); /* clear old interrupts */ alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); alx_irq_enable(alx); if (!resume) netif_tx_start_all_queues(alx->dev); alx_schedule_link_check(alx); return 0; out_free_rings: alx_free_rings(alx); alx_free_napis(alx); out_disable_adv_intr: pci_free_irq_vectors(alx->hw.pdev); return err; } static void __alx_stop(struct alx_priv *alx) { lockdep_assert_held(&alx->mtx); alx_free_irq(alx); cancel_work_sync(&alx->link_check_wk); cancel_work_sync(&alx->reset_wk); alx_halt(alx); alx_free_rings(alx); alx_free_napis(alx); } static const char *alx_speed_desc(struct alx_hw *hw) { switch (alx_speed_to_ethadv(hw->link_speed, hw->duplex)) { case ADVERTISED_1000baseT_Full: return "1 Gbps Full"; case ADVERTISED_100baseT_Full: return "100 Mbps Full"; case ADVERTISED_100baseT_Half: return "100 Mbps Half"; case ADVERTISED_10baseT_Full: return "10 Mbps Full"; case ADVERTISED_10baseT_Half: return "10 Mbps Half"; default: return "Unknown speed"; } } static void alx_check_link(struct alx_priv *alx) { struct alx_hw *hw = &alx->hw; unsigned long flags; int old_speed; int err; lockdep_assert_held(&alx->mtx); /* clear PHY internal interrupt status, otherwise the main * interrupt status will be asserted forever */ alx_clear_phy_intr(hw); old_speed = hw->link_speed; err = alx_read_phy_link(hw); if (err < 0) goto reset; spin_lock_irqsave(&alx->irq_lock, flags); alx->int_mask |= ALX_ISR_PHY; alx_write_mem32(hw, ALX_IMR, alx->int_mask); spin_unlock_irqrestore(&alx->irq_lock, flags); if (old_speed == hw->link_speed) return; if (hw->link_speed != SPEED_UNKNOWN) { netif_info(alx, link, alx->dev, "NIC Up: %s\n", alx_speed_desc(hw)); alx_post_phy_link(hw); alx_enable_aspm(hw, true, true); alx_start_mac(hw); if (old_speed == SPEED_UNKNOWN) alx_netif_start(alx); } else { /* link is now down */ alx_netif_stop(alx); netif_info(alx, link, alx->dev, "Link Down\n"); err = alx_reset_mac(hw); if (err) goto reset; alx_irq_disable(alx); /* MAC reset causes all HW settings to be lost, restore all */ err = alx_reinit_rings(alx); if (err) goto reset; alx_configure(alx); alx_enable_aspm(hw, false, true); alx_post_phy_link(hw); alx_irq_enable(alx); } return; reset: alx_schedule_reset(alx); } static int alx_open(struct net_device *netdev) { struct alx_priv *alx = netdev_priv(netdev); int ret; mutex_lock(&alx->mtx); ret = __alx_open(alx, false); mutex_unlock(&alx->mtx); return ret; } static int alx_stop(struct net_device *netdev) { struct alx_priv *alx = netdev_priv(netdev); mutex_lock(&alx->mtx); __alx_stop(alx); mutex_unlock(&alx->mtx); return 0; } static void alx_link_check(struct work_struct *work) { struct alx_priv *alx; alx = container_of(work, struct alx_priv, link_check_wk); mutex_lock(&alx->mtx); alx_check_link(alx); mutex_unlock(&alx->mtx); } static void alx_reset(struct work_struct *work) { struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk); mutex_lock(&alx->mtx); alx_reinit(alx); mutex_unlock(&alx->mtx); } static int alx_tpd_req(struct sk_buff *skb) { int num; num = skb_shinfo(skb)->nr_frags + 1; /* we need one extra descriptor for LSOv2 */ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) num++; return num; } static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first) { u8 cso, css; if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; cso = skb_checksum_start_offset(skb); if (cso & 1) return -EINVAL; css = cso + skb->csum_offset; first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT); first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT); first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT); return 0; } static int alx_tso(struct sk_buff *skb, struct alx_txd *first) { int err; if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; if (!skb_is_gso(skb)) return 0; err = skb_cow_head(skb, 0); if (err < 0) return err; if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); first->word1 |= 1 << TPD_IPV4_SHIFT; } else if (skb_is_gso_v6(skb)) { tcp_v6_gso_csum_prep(skb); /* LSOv2: the first TPD only provides the packet length */ first->adrl.l.pkt_len = skb->len; first->word1 |= 1 << TPD_LSO_V2_SHIFT; } first->word1 |= 1 << TPD_LSO_EN_SHIFT; first->word1 |= (skb_transport_offset(skb) & TPD_L4HDROFFSET_MASK) << TPD_L4HDROFFSET_SHIFT; first->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) << TPD_MSS_SHIFT; return 1; } static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb) { struct alx_txd *tpd, *first_tpd; dma_addr_t dma; int maplen, f, first_idx = txq->write_idx; first_tpd = &txq->tpd[txq->write_idx]; tpd = first_tpd; if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) { if (++txq->write_idx == txq->count) txq->write_idx = 0; tpd = &txq->tpd[txq->write_idx]; tpd->len = first_tpd->len; tpd->vlan_tag = first_tpd->vlan_tag; tpd->word1 = first_tpd->word1; } maplen = skb_headlen(skb); dma = dma_map_single(txq->dev, skb->data, maplen, DMA_TO_DEVICE); if (dma_mapping_error(txq->dev, dma)) goto err_dma; dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); tpd->adrl.addr = cpu_to_le64(dma); tpd->len = cpu_to_le16(maplen); for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; if (++txq->write_idx == txq->count) txq->write_idx = 0; tpd = &txq->tpd[txq->write_idx]; tpd->word1 = first_tpd->word1; maplen = skb_frag_size(frag); dma = skb_frag_dma_map(txq->dev, frag, 0, maplen, DMA_TO_DEVICE); if (dma_mapping_error(txq->dev, dma)) goto err_dma; dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); tpd->adrl.addr = cpu_to_le64(dma); tpd->len = cpu_to_le16(maplen); } /* last TPD, set EOP flag and store skb */ tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT); txq->bufs[txq->write_idx].skb = skb; if (++txq->write_idx == txq->count) txq->write_idx = 0; return 0; err_dma: f = first_idx; while (f != txq->write_idx) { alx_free_txbuf(txq, f); if (++f == txq->count) f = 0; } return -ENOMEM; } static netdev_tx_t alx_start_xmit_ring(struct sk_buff *skb, struct alx_tx_queue *txq) { struct alx_priv *alx; struct alx_txd *first; int tso; alx = netdev_priv(txq->netdev); if (alx_tpd_avail(txq) < alx_tpd_req(skb)) { netif_tx_stop_queue(alx_get_tx_queue(txq)); goto drop; } first = &txq->tpd[txq->write_idx]; memset(first, 0, sizeof(*first)); tso = alx_tso(skb, first); if (tso < 0) goto drop; else if (!tso && alx_tx_csum(skb, first)) goto drop; if (alx_map_tx_skb(txq, skb) < 0) goto drop; netdev_tx_sent_queue(alx_get_tx_queue(txq), skb->len); /* flush updates before updating hardware */ wmb(); alx_write_mem16(&alx->hw, txq->p_reg, txq->write_idx); if (alx_tpd_avail(txq) < txq->count / 8) netif_tx_stop_queue(alx_get_tx_queue(txq)); return NETDEV_TX_OK; drop: dev_kfree_skb_any(skb); return NETDEV_TX_OK; } static netdev_tx_t alx_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct alx_priv *alx = netdev_priv(netdev); return alx_start_xmit_ring(skb, alx_tx_queue_mapping(alx, skb)); } static void alx_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct alx_priv *alx = netdev_priv(dev); alx_schedule_reset(alx); } static int alx_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; u16 val; int err; if (prtad != hw->mdio.prtad) return -EINVAL; if (devad == MDIO_DEVAD_NONE) err = alx_read_phy_reg(hw, addr, &val); else err = alx_read_phy_ext(hw, devad, addr, &val); if (err) return err; return val; } static int alx_mdio_write(struct net_device *netdev, int prtad, int devad, u16 addr, u16 val) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; if (prtad != hw->mdio.prtad) return -EINVAL; if (devad == MDIO_DEVAD_NONE) return alx_write_phy_reg(hw, addr, val); return alx_write_phy_ext(hw, devad, addr, val); } static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct alx_priv *alx = netdev_priv(netdev); if (!netif_running(netdev)) return -EAGAIN; return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd); } #ifdef CONFIG_NET_POLL_CONTROLLER static void alx_poll_controller(struct net_device *netdev) { struct alx_priv *alx = netdev_priv(netdev); int i; if (alx->hw.pdev->msix_enabled) { alx_intr_msix_misc(0, alx); for (i = 0; i < alx->num_txq; i++) alx_intr_msix_ring(0, alx->qnapi[i]); } else if (alx->hw.pdev->msi_enabled) alx_intr_msi(0, alx); else alx_intr_legacy(0, alx); } #endif static void alx_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) { struct alx_priv *alx = netdev_priv(dev); struct alx_hw_stats *hw_stats = &alx->hw.stats; spin_lock(&alx->stats_lock); alx_update_hw_stats(&alx->hw); net_stats->tx_bytes = hw_stats->tx_byte_cnt; net_stats->rx_bytes = hw_stats->rx_byte_cnt; net_stats->multicast = hw_stats->rx_mcast; net_stats->collisions = hw_stats->tx_single_col + hw_stats->tx_multi_col + hw_stats->tx_late_col + hw_stats->tx_abort_col; net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err + hw_stats->rx_len_err + hw_stats->rx_ov_sz + hw_stats->rx_ov_rrd + hw_stats->rx_align_err + hw_stats->rx_ov_rxf; net_stats->rx_fifo_errors = hw_stats->rx_ov_rxf; net_stats->rx_length_errors = hw_stats->rx_len_err; net_stats->rx_crc_errors = hw_stats->rx_fcs_err; net_stats->rx_frame_errors = hw_stats->rx_align_err; net_stats->rx_dropped = hw_stats->rx_ov_rrd; net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col + hw_stats->tx_underrun + hw_stats->tx_trunc; net_stats->tx_aborted_errors = hw_stats->tx_abort_col; net_stats->tx_fifo_errors = hw_stats->tx_underrun; net_stats->tx_window_errors = hw_stats->tx_late_col; net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; spin_unlock(&alx->stats_lock); } static const struct net_device_ops alx_netdev_ops = { .ndo_open = alx_open, .ndo_stop = alx_stop, .ndo_start_xmit = alx_start_xmit, .ndo_get_stats64 = alx_get_stats64, .ndo_set_rx_mode = alx_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = alx_set_mac_address, .ndo_change_mtu = alx_change_mtu, .ndo_eth_ioctl = alx_ioctl, .ndo_tx_timeout = alx_tx_timeout, .ndo_fix_features = alx_fix_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = alx_poll_controller, #endif }; static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct alx_priv *alx; struct alx_hw *hw; bool phy_configured; int err; err = pci_enable_device_mem(pdev); if (err) return err; /* The alx chip can DMA to 64-bit addresses, but it uses a single * shared register for the high 32 bits, so only a single, aligned, * 4 GB physical address range can be used for descriptors. */ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); } else { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA config, aborting\n"); goto out_pci_disable; } } err = pci_request_mem_regions(pdev, alx_drv_name); if (err) { dev_err(&pdev->dev, "pci_request_mem_regions failed\n"); goto out_pci_disable; } pci_set_master(pdev); if (!pdev->pm_cap) { dev_err(&pdev->dev, "Can't find power management capability, aborting\n"); err = -EIO; goto out_pci_release; } netdev = alloc_etherdev_mqs(sizeof(*alx), ALX_MAX_TX_QUEUES, 1); if (!netdev) { err = -ENOMEM; goto out_pci_release; } SET_NETDEV_DEV(netdev, &pdev->dev); alx = netdev_priv(netdev); spin_lock_init(&alx->hw.mdio_lock); spin_lock_init(&alx->irq_lock); spin_lock_init(&alx->stats_lock); alx->dev = netdev; alx->hw.pdev = pdev; alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL; hw = &alx->hw; pci_set_drvdata(pdev, alx); hw->hw_addr = pci_ioremap_bar(pdev, 0); if (!hw->hw_addr) { dev_err(&pdev->dev, "cannot map device registers\n"); err = -EIO; goto out_free_netdev; } netdev->netdev_ops = &alx_netdev_ops; netdev->ethtool_ops = &alx_ethtool_ops; netdev->irq = pci_irq_vector(pdev, 0); netdev->watchdog_timeo = ALX_WATCHDOG_TIME; if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG) pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; err = alx_init_sw(alx); if (err) { dev_err(&pdev->dev, "net device private data init failed\n"); goto out_unmap; } mutex_lock(&alx->mtx); alx_reset_pcie(hw); phy_configured = alx_phy_configured(hw); if (!phy_configured) alx_reset_phy(hw); err = alx_reset_mac(hw); if (err) { dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err); goto out_unlock; } /* setup link to put it in a known good starting state */ if (!phy_configured) { err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); if (err) { dev_err(&pdev->dev, "failed to configure PHY speed/duplex (err=%d)\n", err); goto out_unlock; } } netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6; if (alx_get_perm_macaddr(hw, hw->perm_addr)) { dev_warn(&pdev->dev, "Invalid permanent address programmed, using random one\n"); eth_hw_addr_random(netdev); memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len); } memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN); eth_hw_addr_set(netdev, hw->mac_addr); memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN); hw->mdio.prtad = 0; hw->mdio.mmds = 0; hw->mdio.dev = netdev; hw->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22 | MDIO_EMULATE_C22; hw->mdio.mdio_read = alx_mdio_read; hw->mdio.mdio_write = alx_mdio_write; if (!alx_get_phy_info(hw)) { dev_err(&pdev->dev, "failed to identify PHY\n"); err = -EIO; goto out_unlock; } mutex_unlock(&alx->mtx); INIT_WORK(&alx->link_check_wk, alx_link_check); INIT_WORK(&alx->reset_wk, alx_reset); netif_carrier_off(netdev); err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "register netdevice failed\n"); goto out_unmap; } netdev_info(netdev, "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n", netdev->dev_addr); return 0; out_unlock: mutex_unlock(&alx->mtx); out_unmap: iounmap(hw->hw_addr); out_free_netdev: free_netdev(netdev); out_pci_release: pci_release_mem_regions(pdev); out_pci_disable: pci_disable_device(pdev); return err; } static void alx_remove(struct pci_dev *pdev) { struct alx_priv *alx = pci_get_drvdata(pdev); struct alx_hw *hw = &alx->hw; /* restore permanent mac address */ alx_set_macaddr(hw, hw->perm_addr); unregister_netdev(alx->dev); iounmap(hw->hw_addr); pci_release_mem_regions(pdev); pci_disable_device(pdev); mutex_destroy(&alx->mtx); free_netdev(alx->dev); } static int alx_suspend(struct device *dev) { struct alx_priv *alx = dev_get_drvdata(dev); if (!netif_running(alx->dev)) return 0; rtnl_lock(); netif_device_detach(alx->dev); mutex_lock(&alx->mtx); __alx_stop(alx); mutex_unlock(&alx->mtx); rtnl_unlock(); return 0; } static int alx_resume(struct device *dev) { struct alx_priv *alx = dev_get_drvdata(dev); struct alx_hw *hw = &alx->hw; int err; rtnl_lock(); mutex_lock(&alx->mtx); alx_reset_phy(hw); if (!netif_running(alx->dev)) { err = 0; goto unlock; } err = __alx_open(alx, true); if (err) goto unlock; netif_device_attach(alx->dev); unlock: mutex_unlock(&alx->mtx); rtnl_unlock(); return err; } static DEFINE_SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct alx_priv *alx = pci_get_drvdata(pdev); struct net_device *netdev = alx->dev; pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET; dev_info(&pdev->dev, "pci error detected\n"); mutex_lock(&alx->mtx); if (netif_running(netdev)) { netif_device_detach(netdev); alx_halt(alx); } if (state == pci_channel_io_perm_failure) rc = PCI_ERS_RESULT_DISCONNECT; else pci_disable_device(pdev); mutex_unlock(&alx->mtx); return rc; } static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev) { struct alx_priv *alx = pci_get_drvdata(pdev); struct alx_hw *hw = &alx->hw; pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; dev_info(&pdev->dev, "pci error slot reset\n"); mutex_lock(&alx->mtx); if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n"); goto out; } pci_set_master(pdev); alx_reset_pcie(hw); if (!alx_reset_mac(hw)) rc = PCI_ERS_RESULT_RECOVERED; out: mutex_unlock(&alx->mtx); return rc; } static void alx_pci_error_resume(struct pci_dev *pdev) { struct alx_priv *alx = pci_get_drvdata(pdev); struct net_device *netdev = alx->dev; dev_info(&pdev->dev, "pci error resume\n"); mutex_lock(&alx->mtx); if (netif_running(netdev)) { alx_activate(alx); netif_device_attach(netdev); } mutex_unlock(&alx->mtx); } static const struct pci_error_handlers alx_err_handlers = { .error_detected = alx_pci_error_detected, .slot_reset = alx_pci_error_slot_reset, .resume = alx_pci_error_resume, }; static const struct pci_device_id alx_pci_tbl[] = { { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) }, {} }; static struct pci_driver alx_driver = { .name = alx_drv_name, .id_table = alx_pci_tbl, .probe = alx_probe, .remove = alx_remove, .err_handler = &alx_err_handlers, .driver.pm = pm_sleep_ptr(&alx_pm_ops), }; module_pci_driver(alx_driver); MODULE_DEVICE_TABLE(pci, alx_pci_tbl); MODULE_AUTHOR("Johannes Berg <[email protected]>"); MODULE_AUTHOR("Qualcomm Corporation"); MODULE_DESCRIPTION( "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver"); MODULE_LICENSE("GPL");
linux-master
drivers/net/ethernet/atheros/alx/main.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. */ #include "atl1c.h" char atl1c_driver_name[] = "atl1c"; /* * atl1c_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ static const struct pci_device_id atl1c_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)}, {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)}, {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)}, {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)}, {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)}, {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)}, /* required last entry */ { 0 } }; MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl); MODULE_AUTHOR("Jie Yang"); MODULE_AUTHOR("Qualcomm Atheros Inc."); MODULE_DESCRIPTION("Qualcomm Atheros 100/1000M Ethernet Network Driver"); MODULE_LICENSE("GPL"); struct atl1c_qregs { u16 tpd_addr_lo; u16 tpd_prod; u16 tpd_cons; u16 rfd_addr_lo; u16 rrd_addr_lo; u16 rfd_prod; u32 tx_isr; u32 rx_isr; }; static struct atl1c_qregs atl1c_qregs[AT_MAX_TRANSMIT_QUEUE] = { { REG_TPD_PRI0_ADDR_LO, REG_TPD_PRI0_PIDX, REG_TPD_PRI0_CIDX, REG_RFD0_HEAD_ADDR_LO, REG_RRD0_HEAD_ADDR_LO, REG_MB_RFD0_PROD_IDX, ISR_TX_PKT_0, ISR_RX_PKT_0 }, { REG_TPD_PRI1_ADDR_LO, REG_TPD_PRI1_PIDX, REG_TPD_PRI1_CIDX, REG_RFD1_HEAD_ADDR_LO, REG_RRD1_HEAD_ADDR_LO, REG_MB_RFD1_PROD_IDX, ISR_TX_PKT_1, ISR_RX_PKT_1 }, { REG_TPD_PRI2_ADDR_LO, REG_TPD_PRI2_PIDX, REG_TPD_PRI2_CIDX, REG_RFD2_HEAD_ADDR_LO, REG_RRD2_HEAD_ADDR_LO, REG_MB_RFD2_PROD_IDX, ISR_TX_PKT_2, ISR_RX_PKT_2 }, { REG_TPD_PRI3_ADDR_LO, REG_TPD_PRI3_PIDX, REG_TPD_PRI3_CIDX, REG_RFD3_HEAD_ADDR_LO, REG_RRD3_HEAD_ADDR_LO, REG_MB_RFD3_PROD_IDX, ISR_TX_PKT_3, ISR_RX_PKT_3 }, }; static int atl1c_stop_mac(struct atl1c_hw *hw); static void atl1c_disable_l0s_l1(struct atl1c_hw *hw); static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed); static void atl1c_start_mac(struct atl1c_adapter *adapter); static int atl1c_up(struct atl1c_adapter *adapter); static void atl1c_down(struct atl1c_adapter *adapter); static int atl1c_reset_mac(struct atl1c_hw *hw); static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter); static int atl1c_configure(struct atl1c_adapter *adapter); static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue, bool napi_mode); static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; static void atl1c_pcie_patch(struct atl1c_hw *hw) { u32 mst_data, data; /* pclk sel could switch to 25M */ AT_READ_REG(hw, REG_MASTER_CTRL, &mst_data); mst_data &= ~MASTER_CTRL_CLK_SEL_DIS; AT_WRITE_REG(hw, REG_MASTER_CTRL, mst_data); /* WoL/PCIE related settings */ if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { AT_READ_REG(hw, REG_PCIE_PHYMISC, &data); data |= PCIE_PHYMISC_FORCE_RCV_DET; AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data); } else { /* new dev set bit5 of MASTER */ if (!(mst_data & MASTER_CTRL_WAKEN_25M)) AT_WRITE_REG(hw, REG_MASTER_CTRL, mst_data | MASTER_CTRL_WAKEN_25M); } /* aspm/PCIE setting only for l2cb 1.0 */ if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) { AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data); data = FIELD_SETX(data, PCIE_PHYMISC2_CDR_BW, L2CB1_PCIE_PHYMISC2_CDR_BW); data = FIELD_SETX(data, PCIE_PHYMISC2_L0S_TH, L2CB1_PCIE_PHYMISC2_L0S_TH); AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data); /* extend L1 sync timer */ AT_READ_REG(hw, REG_LINK_CTRL, &data); data |= LINK_CTRL_EXT_SYNC; AT_WRITE_REG(hw, REG_LINK_CTRL, data); } /* l2cb 1.x & l1d 1.x */ if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d) { AT_READ_REG(hw, REG_PM_CTRL, &data); data |= PM_CTRL_L0S_BUFSRX_EN; AT_WRITE_REG(hw, REG_PM_CTRL, data); /* clear vendor msg */ AT_READ_REG(hw, REG_DMA_DBG, &data); AT_WRITE_REG(hw, REG_DMA_DBG, data & ~DMA_DBG_VENDOR_MSG); } } /* FIXME: no need any more ? */ /* * atl1c_init_pcie - init PCIE module */ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) { u32 data; u32 pci_cmd; struct pci_dev *pdev = hw->adapter->pdev; int pos; AT_READ_REG(hw, PCI_COMMAND, &pci_cmd); pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; pci_cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_IO); AT_WRITE_REG(hw, PCI_COMMAND, pci_cmd); /* * Clear any PowerSaveing Settings */ pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); /* wol sts read-clear */ AT_READ_REG(hw, REG_WOL_CTRL, &data); AT_WRITE_REG(hw, REG_WOL_CTRL, 0); /* * Mask some pcie error bits */ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); if (pos) { pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); } /* clear error status */ pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_NFED | PCI_EXP_DEVSTA_FED | PCI_EXP_DEVSTA_CED | PCI_EXP_DEVSTA_URD); AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data); data &= ~LTSSM_ID_EN_WRO; AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data); atl1c_pcie_patch(hw); if (flag & ATL1C_PCIE_L0S_L1_DISABLE) atl1c_disable_l0s_l1(hw); msleep(5); } /** * atl1c_irq_enable - Enable default interrupt generation settings * @adapter: board private structure */ static inline void atl1c_irq_enable(struct atl1c_adapter *adapter) { if (likely(atomic_dec_and_test(&adapter->irq_sem))) { AT_WRITE_REG(&adapter->hw, REG_ISR, 0x7FFFFFFF); AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); AT_WRITE_FLUSH(&adapter->hw); } } /** * atl1c_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure */ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter) { atomic_inc(&adapter->irq_sem); AT_WRITE_REG(&adapter->hw, REG_IMR, 0); AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT); AT_WRITE_FLUSH(&adapter->hw); synchronize_irq(adapter->pdev->irq); } /* * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads * of the idle status register until the device is actually idle */ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl) { int timeout; u32 data; for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { AT_READ_REG(hw, REG_IDLE_STATUS, &data); if ((data & modu_ctrl) == 0) return 0; msleep(1); } return data; } /** * atl1c_phy_config - Timer Call-back * @t: timer list containing pointer to netdev cast into an unsigned long */ static void atl1c_phy_config(struct timer_list *t) { struct atl1c_adapter *adapter = from_timer(adapter, t, phy_config_timer); struct atl1c_hw *hw = &adapter->hw; unsigned long flags; spin_lock_irqsave(&adapter->mdio_lock, flags); atl1c_restart_autoneg(hw); spin_unlock_irqrestore(&adapter->mdio_lock, flags); } void atl1c_reinit_locked(struct atl1c_adapter *adapter) { atl1c_down(adapter); atl1c_up(adapter); clear_bit(__AT_RESETTING, &adapter->flags); } static void atl1c_check_link_status(struct atl1c_adapter *adapter) { struct atl1c_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; int err; unsigned long flags; u16 speed, duplex; bool link; spin_lock_irqsave(&adapter->mdio_lock, flags); link = atl1c_get_link_status(hw); spin_unlock_irqrestore(&adapter->mdio_lock, flags); if (!link) { /* link down */ netif_carrier_off(netdev); hw->hibernate = true; if (atl1c_reset_mac(hw) != 0) if (netif_msg_hw(adapter)) dev_warn(&pdev->dev, "reset mac failed\n"); atl1c_set_aspm(hw, SPEED_0); atl1c_post_phy_linkchg(hw, SPEED_0); atl1c_reset_dma_ring(adapter); atl1c_configure(adapter); } else { /* Link Up */ hw->hibernate = false; spin_lock_irqsave(&adapter->mdio_lock, flags); err = atl1c_get_speed_and_duplex(hw, &speed, &duplex); spin_unlock_irqrestore(&adapter->mdio_lock, flags); if (unlikely(err)) return; /* link result is our setting */ if (adapter->link_speed != speed || adapter->link_duplex != duplex) { adapter->link_speed = speed; adapter->link_duplex = duplex; atl1c_set_aspm(hw, speed); atl1c_post_phy_linkchg(hw, speed); atl1c_start_mac(adapter); if (netif_msg_link(adapter)) dev_info(&pdev->dev, "%s: %s NIC Link is Up<%d Mbps %s>\n", atl1c_driver_name, netdev->name, adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "Full Duplex" : "Half Duplex"); } if (!netif_carrier_ok(netdev)) netif_carrier_on(netdev); } } static void atl1c_link_chg_event(struct atl1c_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; bool link; spin_lock(&adapter->mdio_lock); link = atl1c_get_link_status(&adapter->hw); spin_unlock(&adapter->mdio_lock); /* notify upper layer link down ASAP */ if (!link) { if (netif_carrier_ok(netdev)) { /* old link state: Up */ netif_carrier_off(netdev); if (netif_msg_link(adapter)) dev_info(&pdev->dev, "%s: %s NIC Link is Down\n", atl1c_driver_name, netdev->name); adapter->link_speed = SPEED_0; } } set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event); schedule_work(&adapter->common_task); } static void atl1c_common_task(struct work_struct *work) { struct atl1c_adapter *adapter; struct net_device *netdev; adapter = container_of(work, struct atl1c_adapter, common_task); netdev = adapter->netdev; if (test_bit(__AT_DOWN, &adapter->flags)) return; if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) { netif_device_detach(netdev); atl1c_down(adapter); atl1c_up(adapter); netif_device_attach(netdev); } if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event)) { atl1c_irq_disable(adapter); atl1c_check_link_status(adapter); atl1c_irq_enable(adapter); } } static void atl1c_del_timer(struct atl1c_adapter *adapter) { del_timer_sync(&adapter->phy_config_timer); } /** * atl1c_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * @txqueue: index of hanging tx queue */ static void atl1c_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct atl1c_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event); schedule_work(&adapter->common_task); } /** * atl1c_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * * The set_multi entry point is called whenever the multicast address * list or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper multicast, * promiscuous mode, and all-multi behavior. */ static void atl1c_set_multi(struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; u32 mac_ctrl_data; u32 hash_value; /* Check for Promiscuous and All Multicast modes */ AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data); if (netdev->flags & IFF_PROMISC) { mac_ctrl_data |= MAC_CTRL_PROMIS_EN; } else if (netdev->flags & IFF_ALLMULTI) { mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN; } else { mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); } AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); /* clear the old settings from the multicast hash table */ AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); /* comoute mc addresses' hash value ,and put it into hash table */ netdev_for_each_mc_addr(ha, netdev) { hash_value = atl1c_hash_mc_addr(hw, ha->addr); atl1c_hash_set(hw, hash_value); } } static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) { if (features & NETIF_F_HW_VLAN_CTAG_RX) { /* enable VLAN tag insert/strip */ *mac_ctrl_data |= MAC_CTRL_RMV_VLAN; } else { /* disable VLAN tag insert/strip */ *mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN; } } static void atl1c_vlan_mode(struct net_device *netdev, netdev_features_t features) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; u32 mac_ctrl_data = 0; if (netif_msg_pktdata(adapter)) dev_dbg(&pdev->dev, "atl1c_vlan_mode\n"); atl1c_irq_disable(adapter); AT_READ_REG(&adapter->hw, REG_MAC_CTRL, &mac_ctrl_data); __atl1c_vlan_mode(features, &mac_ctrl_data); AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); atl1c_irq_enable(adapter); } static void atl1c_restore_vlan(struct atl1c_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; if (netif_msg_pktdata(adapter)) dev_dbg(&pdev->dev, "atl1c_restore_vlan\n"); atl1c_vlan_mode(adapter->netdev, adapter->netdev->features); } /** * atl1c_set_mac_addr - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure */ static int atl1c_set_mac_addr(struct net_device *netdev, void *p) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (netif_running(netdev)) return -EBUSY; eth_hw_addr_set(netdev, addr->sa_data); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr); return 0; } static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, struct net_device *dev) { unsigned int head_size; int mtu = dev->mtu; adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); adapter->rx_frag_size = roundup_pow_of_two(head_size); } static netdev_features_t atl1c_fix_features(struct net_device *netdev, netdev_features_t features) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; /* * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ if (features & NETIF_F_HW_VLAN_CTAG_RX) features |= NETIF_F_HW_VLAN_CTAG_TX; else features &= ~NETIF_F_HW_VLAN_CTAG_TX; if (hw->nic_type != athr_mt) { if (netdev->mtu > MAX_TSO_FRAME_SIZE) features &= ~(NETIF_F_TSO | NETIF_F_TSO6); } return features; } static int atl1c_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_CTAG_RX) atl1c_vlan_mode(netdev, features); return 0; } static void atl1c_set_max_mtu(struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; switch (hw->nic_type) { /* These (GbE) devices support jumbo packets, max_mtu 6122 */ case athr_l1c: case athr_l1d: case athr_l1d_2: netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); break; case athr_mt: netdev->max_mtu = 9500; break; /* The 10/100 devices don't support jumbo packets, max_mtu 1500 */ default: netdev->max_mtu = ETH_DATA_LEN; break; } } /** * atl1c_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure */ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu) { struct atl1c_adapter *adapter = netdev_priv(netdev); /* set MTU */ if (netif_running(netdev)) { while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) msleep(1); netdev->mtu = new_mtu; adapter->hw.max_frame_size = new_mtu; atl1c_set_rxbufsize(adapter, netdev); atl1c_down(adapter); netdev_update_features(netdev); atl1c_up(adapter); clear_bit(__AT_RESETTING, &adapter->flags); } return 0; } /* * caller should hold mdio_lock */ static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num) { struct atl1c_adapter *adapter = netdev_priv(netdev); u16 result; atl1c_read_phy_reg(&adapter->hw, reg_num, &result); return result; } static void atl1c_mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val) { struct atl1c_adapter *adapter = netdev_priv(netdev); atl1c_write_phy_reg(&adapter->hw, reg_num, val); } static int atl1c_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; struct mii_ioctl_data *data = if_mii(ifr); unsigned long flags; int retval = 0; if (!netif_running(netdev)) return -EINVAL; spin_lock_irqsave(&adapter->mdio_lock, flags); switch (cmd) { case SIOCGMIIPHY: data->phy_id = 0; break; case SIOCGMIIREG: if (atl1c_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, &data->val_out)) { retval = -EIO; goto out; } break; case SIOCSMIIREG: if (data->reg_num & ~(0x1F)) { retval = -EFAULT; goto out; } dev_dbg(&pdev->dev, "<atl1c_mii_ioctl> write %x %x", data->reg_num, data->val_in); if (atl1c_write_phy_reg(&adapter->hw, data->reg_num, data->val_in)) { retval = -EIO; goto out; } break; default: retval = -EOPNOTSUPP; break; } out: spin_unlock_irqrestore(&adapter->mdio_lock, flags); return retval; } static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return atl1c_mii_ioctl(netdev, ifr, cmd); default: return -EOPNOTSUPP; } } /** * atl1c_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * */ static int atl1c_alloc_queues(struct atl1c_adapter *adapter) { return 0; } static enum atl1c_nic_type atl1c_get_mac_type(struct pci_dev *pdev, u8 __iomem *hw_addr) { switch (pdev->device) { case PCI_DEVICE_ID_ATTANSIC_L2C: return athr_l2c; case PCI_DEVICE_ID_ATTANSIC_L1C: return athr_l1c; case PCI_DEVICE_ID_ATHEROS_L2C_B: return athr_l2c_b; case PCI_DEVICE_ID_ATHEROS_L2C_B2: return athr_l2c_b2; case PCI_DEVICE_ID_ATHEROS_L1D: return athr_l1d; case PCI_DEVICE_ID_ATHEROS_L1D_2_0: if (readl(hw_addr + REG_MT_MAGIC) == MT_MAGIC) return athr_mt; return athr_l1d_2; default: return athr_l1c; } } static int atl1c_setup_mac_funcs(struct atl1c_hw *hw) { u32 link_ctrl_data; AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE | ATL1C_TXQ_MODE_ENHANCE; hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT | ATL1C_ASPM_L1_SUPPORT; hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; if (hw->nic_type == athr_l1c || hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2) hw->link_cap_flags |= ATL1C_LINK_CAP_1000M; return 0; } struct atl1c_platform_patch { u16 pci_did; u8 pci_revid; u16 subsystem_vid; u16 subsystem_did; u32 patch_flag; #define ATL1C_LINK_PATCH 0x1 }; static const struct atl1c_platform_patch plats[] = { {0x2060, 0xC1, 0x1019, 0x8152, 0x1}, {0x2060, 0xC1, 0x1019, 0x2060, 0x1}, {0x2060, 0xC1, 0x1019, 0xE000, 0x1}, {0x2062, 0xC0, 0x1019, 0x8152, 0x1}, {0x2062, 0xC0, 0x1019, 0x2062, 0x1}, {0x2062, 0xC0, 0x1458, 0xE000, 0x1}, {0x2062, 0xC1, 0x1019, 0x8152, 0x1}, {0x2062, 0xC1, 0x1019, 0x2062, 0x1}, {0x2062, 0xC1, 0x1458, 0xE000, 0x1}, {0x2062, 0xC1, 0x1565, 0x2802, 0x1}, {0x2062, 0xC1, 0x1565, 0x2801, 0x1}, {0x1073, 0xC0, 0x1019, 0x8151, 0x1}, {0x1073, 0xC0, 0x1019, 0x1073, 0x1}, {0x1073, 0xC0, 0x1458, 0xE000, 0x1}, {0x1083, 0xC0, 0x1458, 0xE000, 0x1}, {0x1083, 0xC0, 0x1019, 0x8151, 0x1}, {0x1083, 0xC0, 0x1019, 0x1083, 0x1}, {0x1083, 0xC0, 0x1462, 0x7680, 0x1}, {0x1083, 0xC0, 0x1565, 0x2803, 0x1}, {0}, }; static void atl1c_patch_assign(struct atl1c_hw *hw) { struct pci_dev *pdev = hw->adapter->pdev; u32 misc_ctrl; int i = 0; hw->msi_lnkpatch = false; while (plats[i].pci_did != 0) { if (plats[i].pci_did == hw->device_id && plats[i].pci_revid == hw->revision_id && plats[i].subsystem_vid == hw->subsystem_vendor_id && plats[i].subsystem_did == hw->subsystem_id) { if (plats[i].patch_flag & ATL1C_LINK_PATCH) hw->msi_lnkpatch = true; } i++; } if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 && hw->revision_id == L2CB_V21) { /* config access mode */ pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR, REG_PCIE_DEV_MISC_CTRL); pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl); misc_ctrl &= ~0x100; pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR, REG_PCIE_DEV_MISC_CTRL); pci_write_config_dword(pdev, REG_PCIE_IND_ACC_DATA, misc_ctrl); } } /** * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter) * @adapter: board private structure to initialize * * atl1c_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). */ static int atl1c_sw_init(struct atl1c_adapter *adapter) { struct atl1c_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; u32 revision; int i; adapter->wol = 0; device_set_wakeup_enable(&pdev->dev, false); adapter->link_speed = SPEED_0; adapter->link_duplex = FULL_DUPLEX; adapter->tpd_ring[0].count = 1024; adapter->rfd_ring[0].count = 512; hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_id = pdev->subsystem_device; pci_read_config_dword(pdev, PCI_CLASS_REVISION, &revision); hw->revision_id = revision & 0xFF; /* before link up, we assume hibernate is true */ hw->hibernate = true; hw->media_type = MEDIA_TYPE_AUTO_SENSOR; if (atl1c_setup_mac_funcs(hw) != 0) { dev_err(&pdev->dev, "set mac function pointers failed\n"); return -1; } atl1c_patch_assign(hw); hw->intr_mask = IMR_NORMAL_MASK; for (i = 0; i < adapter->tx_queue_count; ++i) hw->intr_mask |= atl1c_qregs[i].tx_isr; for (i = 0; i < adapter->rx_queue_count; ++i) hw->intr_mask |= atl1c_qregs[i].rx_isr; hw->phy_configured = false; hw->preamble_len = 7; hw->max_frame_size = adapter->netdev->mtu; hw->autoneg_advertised = ADVERTISED_Autoneg; hw->indirect_tab = 0xE4E4E4E4; hw->base_cpu = 0; hw->ict = 50000; /* 100ms */ hw->smb_timer = 200000; /* 400ms */ hw->rx_imt = 200; hw->tx_imt = 1000; hw->tpd_burst = 5; hw->rfd_burst = 8; hw->dma_order = atl1c_dma_ord_out; hw->dmar_block = atl1c_dma_req_1024; if (atl1c_alloc_queues(adapter)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } /* TODO */ atl1c_set_rxbufsize(adapter, adapter->netdev); atomic_set(&adapter->irq_sem, 1); spin_lock_init(&adapter->mdio_lock); spin_lock_init(&adapter->hw.intr_mask_lock); set_bit(__AT_DOWN, &adapter->flags); return 0; } static inline void atl1c_clean_buffer(struct pci_dev *pdev, struct atl1c_buffer *buffer_info) { u16 pci_driection; if (buffer_info->flags & ATL1C_BUFFER_FREE) return; if (buffer_info->dma) { if (buffer_info->flags & ATL1C_PCIMAP_FROMDEVICE) pci_driection = DMA_FROM_DEVICE; else pci_driection = DMA_TO_DEVICE; if (buffer_info->flags & ATL1C_PCIMAP_SINGLE) dma_unmap_single(&pdev->dev, buffer_info->dma, buffer_info->length, pci_driection); else if (buffer_info->flags & ATL1C_PCIMAP_PAGE) dma_unmap_page(&pdev->dev, buffer_info->dma, buffer_info->length, pci_driection); } if (buffer_info->skb) dev_consume_skb_any(buffer_info->skb); buffer_info->dma = 0; buffer_info->skb = NULL; ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); } /** * atl1c_clean_tx_ring - Free Tx-skb * @adapter: board private structure * @queue: idx of transmit queue */ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter, u32 queue) { struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue]; struct atl1c_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; u16 index, ring_count; ring_count = tpd_ring->count; for (index = 0; index < ring_count; index++) { buffer_info = &tpd_ring->buffer_info[index]; atl1c_clean_buffer(pdev, buffer_info); } netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue)); /* Zero out Tx-buffers */ memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) * ring_count); atomic_set(&tpd_ring->next_to_clean, 0); tpd_ring->next_to_use = 0; } /** * atl1c_clean_rx_ring - Free rx-reservation skbs * @adapter: board private structure * @queue: idx of transmit queue */ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter, u32 queue) { struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue]; struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue]; struct atl1c_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; int j; for (j = 0; j < rfd_ring->count; j++) { buffer_info = &rfd_ring->buffer_info[j]; atl1c_clean_buffer(pdev, buffer_info); } /* zero out the descriptor ring */ memset(rfd_ring->desc, 0, rfd_ring->size); rfd_ring->next_to_clean = 0; rfd_ring->next_to_use = 0; rrd_ring->next_to_use = 0; rrd_ring->next_to_clean = 0; } /* * Read / Write Ptr Initialize: */ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter) { struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; struct atl1c_buffer *buffer_info; int i, j; for (i = 0; i < adapter->tx_queue_count; i++) { tpd_ring[i].next_to_use = 0; atomic_set(&tpd_ring[i].next_to_clean, 0); buffer_info = tpd_ring[i].buffer_info; for (j = 0; j < tpd_ring->count; j++) ATL1C_SET_BUFFER_STATE(&buffer_info[i], ATL1C_BUFFER_FREE); } for (i = 0; i < adapter->rx_queue_count; i++) { rfd_ring[i].next_to_use = 0; rfd_ring[i].next_to_clean = 0; rrd_ring[i].next_to_use = 0; rrd_ring[i].next_to_clean = 0; for (j = 0; j < rfd_ring[i].count; j++) { buffer_info = &rfd_ring[i].buffer_info[j]; ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); } } } /** * atl1c_free_ring_resources - Free Tx / RX descriptor Resources * @adapter: board private structure * * Free all transmit software resources */ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int i; dma_free_coherent(&pdev->dev, adapter->ring_header.size, adapter->ring_header.desc, adapter->ring_header.dma); adapter->ring_header.desc = NULL; /* Note: just free tdp_ring.buffer_info, * it contain rfd_ring.buffer_info, do not double free */ if (adapter->tpd_ring[0].buffer_info) { kfree(adapter->tpd_ring[0].buffer_info); adapter->tpd_ring[0].buffer_info = NULL; } for (i = 0; i < adapter->rx_queue_count; ++i) { if (adapter->rrd_ring[i].rx_page) { put_page(adapter->rrd_ring[i].rx_page); adapter->rrd_ring[i].rx_page = NULL; } } } /** * atl1c_setup_ring_resources - allocate Tx / RX descriptor resources * @adapter: board private structure * * Return 0 on success, negative on failure */ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; struct atl1c_ring_header *ring_header = &adapter->ring_header; int tqc = adapter->tx_queue_count; int rqc = adapter->rx_queue_count; int size; int i; int count = 0; u32 offset = 0; /* Even though only one tpd queue is actually used, the "high" * priority tpd queue also gets initialized */ if (tqc == 1) tqc = 2; for (i = 1; i < tqc; i++) tpd_ring[i].count = tpd_ring[0].count; size = sizeof(struct atl1c_buffer) * (tpd_ring->count * tqc + rfd_ring->count * rqc); tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); if (unlikely(!tpd_ring->buffer_info)) goto err_nomem; for (i = 0; i < tqc; i++) { tpd_ring[i].adapter = adapter; tpd_ring[i].num = i; tpd_ring[i].buffer_info = (tpd_ring->buffer_info + count); count += tpd_ring[i].count; } for (i = 0; i < rqc; i++) { rrd_ring[i].adapter = adapter; rrd_ring[i].num = i; rrd_ring[i].count = rfd_ring[0].count; rfd_ring[i].count = rfd_ring[0].count; rfd_ring[i].buffer_info = (tpd_ring->buffer_info + count); count += rfd_ring->count; } /* * real ring DMA buffer * each ring/block may need up to 8 bytes for alignment, hence the * additional bytes tacked onto the end. */ ring_header->size = sizeof(struct atl1c_tpd_desc) * tpd_ring->count * tqc + sizeof(struct atl1c_rx_free_desc) * rfd_ring->count * rqc + sizeof(struct atl1c_recv_ret_status) * rfd_ring->count * rqc + 8 * 4; ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size, &ring_header->dma, GFP_KERNEL); if (unlikely(!ring_header->desc)) { dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); goto err_nomem; } /* init TPD ring */ tpd_ring[0].dma = roundup(ring_header->dma, 8); offset = tpd_ring[0].dma - ring_header->dma; for (i = 0; i < tqc; i++) { tpd_ring[i].dma = ring_header->dma + offset; tpd_ring[i].desc = (u8 *)ring_header->desc + offset; tpd_ring[i].size = sizeof(struct atl1c_tpd_desc) * tpd_ring[i].count; offset += roundup(tpd_ring[i].size, 8); } for (i = 0; i < rqc; i++) { /* init RFD ring */ rfd_ring[i].dma = ring_header->dma + offset; rfd_ring[i].desc = (u8 *)ring_header->desc + offset; rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) * rfd_ring[i].count; offset += roundup(rfd_ring[i].size, 8); /* init RRD ring */ rrd_ring[i].dma = ring_header->dma + offset; rrd_ring[i].desc = (u8 *)ring_header->desc + offset; rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) * rrd_ring[i].count; offset += roundup(rrd_ring[i].size, 8); } return 0; err_nomem: kfree(tpd_ring->buffer_info); return -ENOMEM; } static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) { struct atl1c_hw *hw = &adapter->hw; struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; int i; int tx_queue_count = adapter->tx_queue_count; if (tx_queue_count == 1) tx_queue_count = 2; /* TPD */ AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI, (u32)((tpd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32)); /* just enable normal priority TX queue */ for (i = 0; i < tx_queue_count; i++) { AT_WRITE_REG(hw, atl1c_qregs[i].tpd_addr_lo, (u32)(tpd_ring[i].dma & AT_DMA_LO_ADDR_MASK)); } AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u32)(tpd_ring[0].count & TPD_RING_SIZE_MASK)); /* RFD */ AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI, (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32)); for (i = 0; i < adapter->rx_queue_count; i++) { AT_WRITE_REG(hw, atl1c_qregs[i].rfd_addr_lo, (u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK)); } AT_WRITE_REG(hw, REG_RFD_RING_SIZE, rfd_ring->count & RFD_RING_SIZE_MASK); AT_WRITE_REG(hw, REG_RX_BUF_SIZE, adapter->rx_buffer_len & RX_BUF_SIZE_MASK); /* RRD */ for (i = 0; i < adapter->rx_queue_count; i++) { AT_WRITE_REG(hw, atl1c_qregs[i].rrd_addr_lo, (u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK)); } AT_WRITE_REG(hw, REG_RRD_RING_SIZE, (rrd_ring->count & RRD_RING_SIZE_MASK)); if (hw->nic_type == athr_l2c_b) { AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L); AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L); AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L); AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L); AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L); AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L); AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/ AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/ } /* Load all of base address above */ AT_WRITE_REG(hw, REG_LOAD_PTR, 1); } static void atl1c_configure_tx(struct atl1c_adapter *adapter) { struct atl1c_hw *hw = &adapter->hw; int max_pay_load; u16 tx_offload_thresh; u32 txq_ctrl_data; tx_offload_thresh = MAX_TSO_FRAME_SIZE; AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH, (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK); max_pay_load = pcie_get_readrq(adapter->pdev) >> 8; hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); /* * if BIOS had changed the dam-read-max-length to an invalid value, * restore it to default value */ if (hw->dmar_block < DEVICE_CTRL_MAXRRS_MIN) { pcie_set_readrq(adapter->pdev, 128 << DEVICE_CTRL_MAXRRS_MIN); hw->dmar_block = DEVICE_CTRL_MAXRRS_MIN; } txq_ctrl_data = hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ? L2CB_TXQ_CFGV : L1C_TXQ_CFGV; AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data); } static void atl1c_configure_rx(struct atl1c_adapter *adapter) { struct atl1c_hw *hw = &adapter->hw; u32 rxq_ctrl_data; rxq_ctrl_data = (hw->rfd_burst & RXQ_RFD_BURST_NUM_MASK) << RXQ_RFD_BURST_NUM_SHIFT; if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM) rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN; /* aspm for gigabit */ if (hw->nic_type != athr_l1d_2 && (hw->device_id & 1) != 0) rxq_ctrl_data = FIELD_SETX(rxq_ctrl_data, ASPM_THRUPUT_LIMIT, ASPM_THRUPUT_LIMIT_100M); AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); } static void atl1c_configure_dma(struct atl1c_adapter *adapter) { struct atl1c_hw *hw = &adapter->hw; u32 dma_ctrl_data; dma_ctrl_data = FIELDX(DMA_CTRL_RORDER_MODE, DMA_CTRL_RORDER_MODE_OUT) | DMA_CTRL_RREQ_PRI_DATA | FIELDX(DMA_CTRL_RREQ_BLEN, hw->dmar_block) | FIELDX(DMA_CTRL_WDLY_CNT, DMA_CTRL_WDLY_CNT_DEF) | FIELDX(DMA_CTRL_RDLY_CNT, DMA_CTRL_RDLY_CNT_DEF); AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); } /* * Stop the mac, transmit and receive units * hw - Struct containing variables accessed by shared code * return : 0 or idle status (if error) */ static int atl1c_stop_mac(struct atl1c_hw *hw) { u32 data; AT_READ_REG(hw, REG_RXQ_CTRL, &data); data &= ~RXQ_CTRL_EN; AT_WRITE_REG(hw, REG_RXQ_CTRL, data); AT_READ_REG(hw, REG_TXQ_CTRL, &data); data &= ~TXQ_CTRL_EN; AT_WRITE_REG(hw, REG_TXQ_CTRL, data); atl1c_wait_until_idle(hw, IDLE_STATUS_RXQ_BUSY | IDLE_STATUS_TXQ_BUSY); AT_READ_REG(hw, REG_MAC_CTRL, &data); data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN); AT_WRITE_REG(hw, REG_MAC_CTRL, data); return (int)atl1c_wait_until_idle(hw, IDLE_STATUS_TXMAC_BUSY | IDLE_STATUS_RXMAC_BUSY); } static void atl1c_start_mac(struct atl1c_adapter *adapter) { struct atl1c_hw *hw = &adapter->hw; u32 mac, txq, rxq; hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX; hw->mac_speed = adapter->link_speed == SPEED_1000 ? atl1c_mac_speed_1000 : atl1c_mac_speed_10_100; AT_READ_REG(hw, REG_TXQ_CTRL, &txq); AT_READ_REG(hw, REG_RXQ_CTRL, &rxq); AT_READ_REG(hw, REG_MAC_CTRL, &mac); txq |= TXQ_CTRL_EN; rxq |= RXQ_CTRL_EN; mac |= MAC_CTRL_TX_EN | MAC_CTRL_TX_FLOW | MAC_CTRL_RX_EN | MAC_CTRL_RX_FLOW | MAC_CTRL_ADD_CRC | MAC_CTRL_PAD | MAC_CTRL_BC_EN | MAC_CTRL_SINGLE_PAUSE_EN | MAC_CTRL_HASH_ALG_CRC32; if (hw->mac_duplex) mac |= MAC_CTRL_DUPLX; else mac &= ~MAC_CTRL_DUPLX; mac = FIELD_SETX(mac, MAC_CTRL_SPEED, hw->mac_speed); mac = FIELD_SETX(mac, MAC_CTRL_PRMLEN, hw->preamble_len); AT_WRITE_REG(hw, REG_TXQ_CTRL, txq); AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq); AT_WRITE_REG(hw, REG_MAC_CTRL, mac); } /* * Reset the transmit and receive units; mask and clear all interrupts. * hw - Struct containing variables accessed by shared code * return : 0 or idle status (if error) */ static int atl1c_reset_mac(struct atl1c_hw *hw) { struct atl1c_adapter *adapter = hw->adapter; struct pci_dev *pdev = adapter->pdev; u32 ctrl_data = 0; atl1c_stop_mac(hw); /* * Issue Soft Reset to the MAC. This will reset the chip's * transmit, receive, DMA. It will not effect * the current PCI configuration. The global reset bit is self- * clearing, and should clear within a microsecond. */ AT_READ_REG(hw, REG_MASTER_CTRL, &ctrl_data); ctrl_data |= MASTER_CTRL_OOB_DIS; AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data | MASTER_CTRL_SOFT_RST); AT_WRITE_FLUSH(hw); msleep(10); /* Wait at least 10ms for All module to be Idle */ if (atl1c_wait_until_idle(hw, IDLE_STATUS_MASK)) { dev_err(&pdev->dev, "MAC state machine can't be idle since" " disabled for 10ms second\n"); return -1; } AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data); /* driver control speed/duplex */ AT_READ_REG(hw, REG_MAC_CTRL, &ctrl_data); AT_WRITE_REG(hw, REG_MAC_CTRL, ctrl_data | MAC_CTRL_SPEED_MODE_SW); /* clk switch setting */ AT_READ_REG(hw, REG_SERDES, &ctrl_data); switch (hw->nic_type) { case athr_l2c_b: ctrl_data &= ~(SERDES_PHY_CLK_SLOWDOWN | SERDES_MAC_CLK_SLOWDOWN); AT_WRITE_REG(hw, REG_SERDES, ctrl_data); break; case athr_l2c_b2: case athr_l1d_2: ctrl_data |= SERDES_PHY_CLK_SLOWDOWN | SERDES_MAC_CLK_SLOWDOWN; AT_WRITE_REG(hw, REG_SERDES, ctrl_data); break; default: break; } return 0; } static void atl1c_disable_l0s_l1(struct atl1c_hw *hw) { u16 ctrl_flags = hw->ctrl_flags; hw->ctrl_flags &= ~(ATL1C_ASPM_L0S_SUPPORT | ATL1C_ASPM_L1_SUPPORT); atl1c_set_aspm(hw, SPEED_0); hw->ctrl_flags = ctrl_flags; } /* * Set ASPM state. * Enable/disable L0s/L1 depend on link state. */ static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed) { u32 pm_ctrl_data; u32 link_l1_timer; AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); pm_ctrl_data &= ~(PM_CTRL_ASPM_L1_EN | PM_CTRL_ASPM_L0S_EN | PM_CTRL_MAC_ASPM_CHK); /* L1 timer */ if (hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { pm_ctrl_data &= ~PMCTRL_TXL1_AFTER_L0S; link_l1_timer = link_speed == SPEED_1000 || link_speed == SPEED_100 ? L1D_PMCTRL_L1_ENTRY_TM_16US : 1; pm_ctrl_data = FIELD_SETX(pm_ctrl_data, L1D_PMCTRL_L1_ENTRY_TM, link_l1_timer); } else { link_l1_timer = hw->nic_type == athr_l2c_b ? L2CB1_PM_CTRL_L1_ENTRY_TM : L1C_PM_CTRL_L1_ENTRY_TM; if (link_speed != SPEED_1000 && link_speed != SPEED_100) link_l1_timer = 1; pm_ctrl_data = FIELD_SETX(pm_ctrl_data, PM_CTRL_L1_ENTRY_TIMER, link_l1_timer); } /* L0S/L1 enable */ if ((hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) && link_speed != SPEED_0) pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN | PM_CTRL_MAC_ASPM_CHK; if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) pm_ctrl_data |= PM_CTRL_ASPM_L1_EN | PM_CTRL_MAC_ASPM_CHK; /* l2cb & l1d & l2cb2 & l1d2 */ if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { pm_ctrl_data = FIELD_SETX(pm_ctrl_data, PM_CTRL_PM_REQ_TIMER, PM_CTRL_PM_REQ_TO_DEF); pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER | PM_CTRL_SERDES_PD_EX_L1 | PM_CTRL_CLK_SWH_L1; pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN | PM_CTRL_SERDES_PLL_L1_EN | PM_CTRL_SERDES_BUFS_RX_L1_EN | PM_CTRL_SA_DLY_EN | PM_CTRL_HOTRST); /* disable l0s if link down or l2cb */ if (link_speed == SPEED_0 || hw->nic_type == athr_l2c_b) pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; } else { /* l1c */ pm_ctrl_data = FIELD_SETX(pm_ctrl_data, PM_CTRL_L1_ENTRY_TIMER, 0); if (link_speed != SPEED_0) { pm_ctrl_data |= PM_CTRL_SERDES_L1_EN | PM_CTRL_SERDES_PLL_L1_EN | PM_CTRL_SERDES_BUFS_RX_L1_EN; pm_ctrl_data &= ~(PM_CTRL_SERDES_PD_EX_L1 | PM_CTRL_CLK_SWH_L1 | PM_CTRL_ASPM_L0S_EN | PM_CTRL_ASPM_L1_EN); } else { /* link down */ pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN | PM_CTRL_SERDES_PLL_L1_EN | PM_CTRL_SERDES_BUFS_RX_L1_EN | PM_CTRL_ASPM_L0S_EN); } } AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); return; } /** * atl1c_configure_mac - Configure Transmit&Receive Unit after Reset * @adapter: board private structure * * Configure the Tx /Rx unit of the MAC after a reset. */ static int atl1c_configure_mac(struct atl1c_adapter *adapter) { struct atl1c_hw *hw = &adapter->hw; u32 master_ctrl_data = 0; u32 intr_modrt_data; u32 data; AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); master_ctrl_data &= ~(MASTER_CTRL_TX_ITIMER_EN | MASTER_CTRL_RX_ITIMER_EN | MASTER_CTRL_INT_RDCLR); /* clear interrupt status */ AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF); /* Clear any WOL status */ AT_WRITE_REG(hw, REG_WOL_CTRL, 0); /* set Interrupt Clear Timer * HW will enable self to assert interrupt event to system after * waiting x-time for software to notify it accept interrupt. */ data = CLK_GATING_EN_ALL; if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) { if (hw->nic_type == athr_l2c_b) data &= ~CLK_GATING_RXMAC_EN; } else data = 0; AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data); AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER, hw->ict & INT_RETRIG_TIMER_MASK); atl1c_configure_des_ring(adapter); if (hw->ctrl_flags & ATL1C_INTR_MODRT_ENABLE) { intr_modrt_data = (hw->tx_imt & IRQ_MODRT_TIMER_MASK) << IRQ_MODRT_TX_TIMER_SHIFT; intr_modrt_data |= (hw->rx_imt & IRQ_MODRT_TIMER_MASK) << IRQ_MODRT_RX_TIMER_SHIFT; AT_WRITE_REG(hw, REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data); master_ctrl_data |= MASTER_CTRL_TX_ITIMER_EN | MASTER_CTRL_RX_ITIMER_EN; } if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ) master_ctrl_data |= MASTER_CTRL_INT_RDCLR; master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN; AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, hw->smb_timer & SMB_STAT_TIMER_MASK); /* set MTU */ AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); atl1c_configure_tx(adapter); atl1c_configure_rx(adapter); atl1c_configure_dma(adapter); return 0; } static int atl1c_configure(struct atl1c_adapter *adapter) { struct net_device *netdev = adapter->netdev; int num; int i; if (adapter->hw.nic_type == athr_mt) { u32 mode; AT_READ_REG(&adapter->hw, REG_MT_MODE, &mode); if (adapter->rx_queue_count == 4) mode |= MT_MODE_4Q; else mode &= ~MT_MODE_4Q; AT_WRITE_REG(&adapter->hw, REG_MT_MODE, mode); } atl1c_init_ring_ptrs(adapter); atl1c_set_multi(netdev); atl1c_restore_vlan(adapter); for (i = 0; i < adapter->rx_queue_count; ++i) { num = atl1c_alloc_rx_buffer(adapter, i, false); if (unlikely(num == 0)) return -ENOMEM; } if (atl1c_configure_mac(adapter)) return -EIO; return 0; } static void atl1c_update_hw_stats(struct atl1c_adapter *adapter) { u16 hw_reg_addr = 0; unsigned long *stats_item = NULL; u32 data; /* update rx status */ hw_reg_addr = REG_MAC_RX_STATUS_BIN; stats_item = &adapter->hw_stats.rx_ok; while (hw_reg_addr <= REG_MAC_RX_STATUS_END) { AT_READ_REG(&adapter->hw, hw_reg_addr, &data); *stats_item += data; stats_item++; hw_reg_addr += 4; } /* update tx status */ hw_reg_addr = REG_MAC_TX_STATUS_BIN; stats_item = &adapter->hw_stats.tx_ok; while (hw_reg_addr <= REG_MAC_TX_STATUS_END) { AT_READ_REG(&adapter->hw, hw_reg_addr, &data); *stats_item += data; stats_item++; hw_reg_addr += 4; } } /** * atl1c_get_stats - Get System Network Statistics * @netdev: network interface device structure * * Returns the address of the device statistics structure. * The statistics are actually updated from the timer callback. */ static struct net_device_stats *atl1c_get_stats(struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw_stats *hw_stats = &adapter->hw_stats; struct net_device_stats *net_stats = &netdev->stats; atl1c_update_hw_stats(adapter); net_stats->rx_bytes = hw_stats->rx_byte_cnt; net_stats->tx_bytes = hw_stats->tx_byte_cnt; net_stats->multicast = hw_stats->rx_mcast; net_stats->collisions = hw_stats->tx_1_col + hw_stats->tx_2_col + hw_stats->tx_late_col + hw_stats->tx_abort_col; net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err + hw_stats->rx_len_err + hw_stats->rx_sz_ov + hw_stats->rx_rrd_ov + hw_stats->rx_align_err + hw_stats->rx_rxf_ov; net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; net_stats->rx_length_errors = hw_stats->rx_len_err; net_stats->rx_crc_errors = hw_stats->rx_fcs_err; net_stats->rx_frame_errors = hw_stats->rx_align_err; net_stats->rx_dropped = hw_stats->rx_rrd_ov; net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col + hw_stats->tx_underrun + hw_stats->tx_trunc; net_stats->tx_fifo_errors = hw_stats->tx_underrun; net_stats->tx_aborted_errors = hw_stats->tx_abort_col; net_stats->tx_window_errors = hw_stats->tx_late_col; net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; return net_stats; } static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter) { u16 phy_data; spin_lock(&adapter->mdio_lock); atl1c_read_phy_reg(&adapter->hw, MII_ISR, &phy_data); spin_unlock(&adapter->mdio_lock); } static int atl1c_clean_tx(struct napi_struct *napi, int budget) { struct atl1c_tpd_ring *tpd_ring = container_of(napi, struct atl1c_tpd_ring, napi); struct atl1c_adapter *adapter = tpd_ring->adapter; struct netdev_queue *txq = netdev_get_tx_queue(napi->dev, tpd_ring->num); struct atl1c_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); u16 hw_next_to_clean; unsigned int total_bytes = 0, total_packets = 0; unsigned long flags; AT_READ_REGW(&adapter->hw, atl1c_qregs[tpd_ring->num].tpd_cons, &hw_next_to_clean); while (next_to_clean != hw_next_to_clean) { buffer_info = &tpd_ring->buffer_info[next_to_clean]; if (buffer_info->skb) { total_bytes += buffer_info->skb->len; total_packets++; } atl1c_clean_buffer(pdev, buffer_info); if (++next_to_clean == tpd_ring->count) next_to_clean = 0; atomic_set(&tpd_ring->next_to_clean, next_to_clean); } netdev_tx_completed_queue(txq, total_packets, total_bytes); if (netif_tx_queue_stopped(txq) && netif_carrier_ok(adapter->netdev)) netif_tx_wake_queue(txq); if (total_packets < budget) { napi_complete_done(napi, total_packets); spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags); adapter->hw.intr_mask |= atl1c_qregs[tpd_ring->num].tx_isr; AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags); return total_packets; } return budget; } static void atl1c_intr_rx_tx(struct atl1c_adapter *adapter, u32 status) { struct atl1c_hw *hw = &adapter->hw; u32 intr_mask; int i; spin_lock(&hw->intr_mask_lock); intr_mask = hw->intr_mask; for (i = 0; i < adapter->rx_queue_count; ++i) { if (!(status & atl1c_qregs[i].rx_isr)) continue; if (napi_schedule_prep(&adapter->rrd_ring[i].napi)) { intr_mask &= ~atl1c_qregs[i].rx_isr; __napi_schedule(&adapter->rrd_ring[i].napi); } } for (i = 0; i < adapter->tx_queue_count; ++i) { if (!(status & atl1c_qregs[i].tx_isr)) continue; if (napi_schedule_prep(&adapter->tpd_ring[i].napi)) { intr_mask &= ~atl1c_qregs[i].tx_isr; __napi_schedule(&adapter->tpd_ring[i].napi); } } if (hw->intr_mask != intr_mask) { hw->intr_mask = intr_mask; AT_WRITE_REG(hw, REG_IMR, hw->intr_mask); } spin_unlock(&hw->intr_mask_lock); } /** * atl1c_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure */ static irqreturn_t atl1c_intr(int irq, void *data) { struct net_device *netdev = data; struct atl1c_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; struct atl1c_hw *hw = &adapter->hw; int max_ints = AT_MAX_INT_WORK; int handled = IRQ_NONE; u32 status; u32 reg_data; do { AT_READ_REG(hw, REG_ISR, &reg_data); status = reg_data & hw->intr_mask; if (status == 0 || (status & ISR_DIS_INT) != 0) { if (max_ints != AT_MAX_INT_WORK) handled = IRQ_HANDLED; break; } /* link event */ if (status & ISR_GPHY) atl1c_clear_phy_int(adapter); /* Ack ISR */ AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); if (status & (ISR_RX_PKT | ISR_TX_PKT)) atl1c_intr_rx_tx(adapter, status); handled = IRQ_HANDLED; /* check if PCIE PHY Link down */ if (status & ISR_ERROR) { if (netif_msg_hw(adapter)) dev_err(&pdev->dev, "atl1c hardware error (status = 0x%x)\n", status & ISR_ERROR); /* reset MAC */ set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event); schedule_work(&adapter->common_task); return IRQ_HANDLED; } if (status & ISR_OVER) if (netif_msg_intr(adapter)) dev_warn(&pdev->dev, "TX/RX overflow (status = 0x%x)\n", status & ISR_OVER); /* link event */ if (status & (ISR_GPHY | ISR_MANUAL)) { netdev->stats.tx_carrier_errors++; atl1c_link_chg_event(adapter); break; } } while (--max_ints > 0); /* re-enable Interrupt*/ AT_WRITE_REG(&adapter->hw, REG_ISR, 0); return handled; } static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter, struct sk_buff *skb, struct atl1c_recv_ret_status *prrs) { if (adapter->hw.nic_type == athr_mt) { if (prrs->word3 & RRS_MT_PROT_ID_TCPUDP) skb->ip_summed = CHECKSUM_UNNECESSARY; return; } /* * The pid field in RRS in not correct sometimes, so we * cannot figure out if the packet is fragmented or not, * so we tell the KERNEL CHECKSUM_NONE */ skb_checksum_none_assert(skb); } static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter, u32 queue, bool napi_mode) { struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue]; struct sk_buff *skb; struct page *page; if (adapter->rx_frag_size > PAGE_SIZE) { if (likely(napi_mode)) return napi_alloc_skb(&rrd_ring->napi, adapter->rx_buffer_len); else return netdev_alloc_skb_ip_align(adapter->netdev, adapter->rx_buffer_len); } page = rrd_ring->rx_page; if (!page) { page = alloc_page(GFP_ATOMIC); if (unlikely(!page)) return NULL; rrd_ring->rx_page = page; rrd_ring->rx_page_offset = 0; } skb = build_skb(page_address(page) + rrd_ring->rx_page_offset, adapter->rx_frag_size); if (likely(skb)) { skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); rrd_ring->rx_page_offset += adapter->rx_frag_size; if (rrd_ring->rx_page_offset >= PAGE_SIZE) rrd_ring->rx_page = NULL; else get_page(page); } return skb; } static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue, bool napi_mode) { struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue]; struct pci_dev *pdev = adapter->pdev; struct atl1c_buffer *buffer_info, *next_info; struct sk_buff *skb; void *vir_addr = NULL; u16 num_alloc = 0; u16 rfd_next_to_use, next_next; struct atl1c_rx_free_desc *rfd_desc; dma_addr_t mapping; next_next = rfd_next_to_use = rfd_ring->next_to_use; if (++next_next == rfd_ring->count) next_next = 0; buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; next_info = &rfd_ring->buffer_info[next_next]; while (next_info->flags & ATL1C_BUFFER_FREE) { rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); skb = atl1c_alloc_skb(adapter, queue, napi_mode); if (unlikely(!skb)) { if (netif_msg_rx_err(adapter)) dev_warn(&pdev->dev, "alloc rx buffer failed\n"); break; } /* * Make buffer alignment 2 beyond a 16 byte boundary * this will result in a 16 byte aligned IP header after * the 14 byte MAC header is removed */ vir_addr = skb->data; ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); buffer_info->skb = skb; buffer_info->length = adapter->rx_buffer_len; mapping = dma_map_single(&pdev->dev, vir_addr, buffer_info->length, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { dev_kfree_skb(skb); buffer_info->skb = NULL; buffer_info->length = 0; ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); netif_warn(adapter, rx_err, adapter->netdev, "RX dma_map_single failed"); break; } buffer_info->dma = mapping; ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, ATL1C_PCIMAP_FROMDEVICE); rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); rfd_next_to_use = next_next; if (++next_next == rfd_ring->count) next_next = 0; buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; next_info = &rfd_ring->buffer_info[next_next]; num_alloc++; } if (num_alloc) { /* TODO: update mailbox here */ wmb(); rfd_ring->next_to_use = rfd_next_to_use; AT_WRITE_REG(&adapter->hw, atl1c_qregs[queue].rfd_prod, rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK); } return num_alloc; } static void atl1c_clean_rrd(struct atl1c_rrd_ring *rrd_ring, struct atl1c_recv_ret_status *rrs, u16 num) { u16 i; /* the relationship between rrd and rfd is one map one */ for (i = 0; i < num; i++, rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean)) { rrs->word3 &= ~RRS_RXD_UPDATED; if (++rrd_ring->next_to_clean == rrd_ring->count) rrd_ring->next_to_clean = 0; } } static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring, struct atl1c_recv_ret_status *rrs, u16 num) { u16 i; u16 rfd_index; struct atl1c_buffer *buffer_info = rfd_ring->buffer_info; rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) & RRS_RX_RFD_INDEX_MASK; for (i = 0; i < num; i++) { buffer_info[rfd_index].skb = NULL; ATL1C_SET_BUFFER_STATE(&buffer_info[rfd_index], ATL1C_BUFFER_FREE); if (++rfd_index == rfd_ring->count) rfd_index = 0; } rfd_ring->next_to_clean = rfd_index; } /** * atl1c_clean_rx - NAPI Rx polling callback * @napi: napi info * @budget: limit of packets to clean */ static int atl1c_clean_rx(struct napi_struct *napi, int budget) { struct atl1c_rrd_ring *rrd_ring = container_of(napi, struct atl1c_rrd_ring, napi); struct atl1c_adapter *adapter = rrd_ring->adapter; u16 rfd_num, rfd_index; u16 length; struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[rrd_ring->num]; struct sk_buff *skb; struct atl1c_recv_ret_status *rrs; struct atl1c_buffer *buffer_info; int work_done = 0; unsigned long flags; /* Keep link state information with original netdev */ if (!netif_carrier_ok(adapter->netdev)) goto quit_polling; while (1) { if (work_done >= budget) break; rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean); if (likely(RRS_RXD_IS_VALID(rrs->word3))) { rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) & RRS_RX_RFD_CNT_MASK; if (unlikely(rfd_num != 1)) /* TODO support mul rfd*/ if (netif_msg_rx_err(adapter)) dev_warn(&pdev->dev, "Multi rfd not support yet!\n"); goto rrs_checked; } else { break; } rrs_checked: atl1c_clean_rrd(rrd_ring, rrs, rfd_num); if (rrs->word3 & (RRS_RX_ERR_SUM | RRS_802_3_LEN_ERR)) { atl1c_clean_rfd(rfd_ring, rrs, rfd_num); if (netif_msg_rx_err(adapter)) dev_warn(&pdev->dev, "wrong packet! rrs word3 is %x\n", rrs->word3); continue; } length = le16_to_cpu((rrs->word3 >> RRS_PKT_SIZE_SHIFT) & RRS_PKT_SIZE_MASK); /* Good Receive */ if (likely(rfd_num == 1)) { rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) & RRS_RX_RFD_INDEX_MASK; buffer_info = &rfd_ring->buffer_info[rfd_index]; dma_unmap_single(&pdev->dev, buffer_info->dma, buffer_info->length, DMA_FROM_DEVICE); skb = buffer_info->skb; } else { /* TODO */ if (netif_msg_rx_err(adapter)) dev_warn(&pdev->dev, "Multi rfd not support yet!\n"); break; } atl1c_clean_rfd(rfd_ring, rrs, rfd_num); skb_put(skb, length - ETH_FCS_LEN); skb->protocol = eth_type_trans(skb, netdev); atl1c_rx_checksum(adapter, skb, rrs); if (rrs->word3 & RRS_VLAN_INS) { u16 vlan; AT_TAG_TO_VLAN(rrs->vlan_tag, vlan); vlan = le16_to_cpu(vlan); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); } napi_gro_receive(napi, skb); work_done++; } if (work_done) atl1c_alloc_rx_buffer(adapter, rrd_ring->num, true); if (work_done < budget) { quit_polling: napi_complete_done(napi, work_done); spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags); adapter->hw.intr_mask |= atl1c_qregs[rrd_ring->num].rx_isr; AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags); } return work_done; } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ static void atl1c_netpoll(struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); disable_irq(adapter->pdev->irq); atl1c_intr(adapter->pdev->irq, netdev); enable_irq(adapter->pdev->irq); } #endif static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, u32 queue) { struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue]; u16 next_to_use = 0; u16 next_to_clean = 0; next_to_clean = atomic_read(&tpd_ring->next_to_clean); next_to_use = tpd_ring->next_to_use; return (u16)(next_to_clean > next_to_use) ? (next_to_clean - next_to_use - 1) : (tpd_ring->count + next_to_clean - next_to_use - 1); } /* * get next usable tpd * Note: should call atl1c_tdp_avail to make sure * there is enough tpd to use */ static struct atl1c_tpd_desc *atl1c_get_tpd(struct atl1c_adapter *adapter, u32 queue) { struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue]; struct atl1c_tpd_desc *tpd_desc; u16 next_to_use = 0; next_to_use = tpd_ring->next_to_use; if (++tpd_ring->next_to_use == tpd_ring->count) tpd_ring->next_to_use = 0; tpd_desc = ATL1C_TPD_DESC(tpd_ring, next_to_use); memset(tpd_desc, 0, sizeof(struct atl1c_tpd_desc)); return tpd_desc; } static struct atl1c_buffer * atl1c_get_tx_buffer(struct atl1c_adapter *adapter, struct atl1c_tpd_desc *tpd) { struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; return &tpd_ring->buffer_info[tpd - (struct atl1c_tpd_desc *)tpd_ring->desc]; } /* Calculate the transmit packet descript needed*/ static u16 atl1c_cal_tpd_req(const struct sk_buff *skb) { u16 tpd_req; u16 proto_hdr_len = 0; tpd_req = skb_shinfo(skb)->nr_frags + 1; if (skb_is_gso(skb)) { proto_hdr_len = skb_tcp_all_headers(skb); if (proto_hdr_len < skb_headlen(skb)) tpd_req++; if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) tpd_req++; } return tpd_req; } static int atl1c_tso_csum(struct atl1c_adapter *adapter, struct sk_buff *skb, struct atl1c_tpd_desc **tpd, u32 queue) { struct pci_dev *pdev = adapter->pdev; unsigned short offload_type; u8 hdr_len; u32 real_len; if (skb_is_gso(skb)) { int err; err = skb_cow_head(skb, 0); if (err < 0) return err; offload_type = skb_shinfo(skb)->gso_type; if (offload_type & SKB_GSO_TCPV4) { real_len = (((unsigned char *)ip_hdr(skb) - skb->data) + ntohs(ip_hdr(skb)->tot_len)); if (real_len < skb->len) { err = pskb_trim(skb, real_len); if (err) return err; } hdr_len = skb_tcp_all_headers(skb); if (unlikely(skb->len == hdr_len)) { /* only xsum need */ if (netif_msg_tx_queued(adapter)) dev_warn(&pdev->dev, "IPV4 tso with zero data??\n"); goto check_sum; } else { ip_hdr(skb)->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic( ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); (*tpd)->word1 |= 1 << TPD_IPV4_PACKET_SHIFT; } } if (offload_type & SKB_GSO_TCPV6) { struct atl1c_tpd_ext_desc *etpd = *(struct atl1c_tpd_ext_desc **)(tpd); memset(etpd, 0, sizeof(struct atl1c_tpd_ext_desc)); *tpd = atl1c_get_tpd(adapter, queue); ipv6_hdr(skb)->payload_len = 0; /* check payload == 0 byte ? */ hdr_len = skb_tcp_all_headers(skb); if (unlikely(skb->len == hdr_len)) { /* only xsum need */ if (netif_msg_tx_queued(adapter)) dev_warn(&pdev->dev, "IPV6 tso with zero data??\n"); goto check_sum; } else tcp_v6_gso_csum_prep(skb); etpd->word1 |= 1 << TPD_LSO_EN_SHIFT; etpd->word1 |= 1 << TPD_LSO_VER_SHIFT; etpd->pkt_len = cpu_to_le32(skb->len); (*tpd)->word1 |= 1 << TPD_LSO_VER_SHIFT; } (*tpd)->word1 |= 1 << TPD_LSO_EN_SHIFT; (*tpd)->word1 |= (skb_transport_offset(skb) & TPD_TCPHDR_OFFSET_MASK) << TPD_TCPHDR_OFFSET_SHIFT; (*tpd)->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) << TPD_MSS_SHIFT; return 0; } check_sum: if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { u8 css, cso; cso = skb_checksum_start_offset(skb); if (unlikely(cso & 0x1)) { if (netif_msg_tx_err(adapter)) dev_err(&adapter->pdev->dev, "payload offset should not an event number\n"); return -1; } else { css = cso + skb->csum_offset; (*tpd)->word1 |= ((cso >> 1) & TPD_PLOADOFFSET_MASK) << TPD_PLOADOFFSET_SHIFT; (*tpd)->word1 |= ((css >> 1) & TPD_CCSUM_OFFSET_MASK) << TPD_CCSUM_OFFSET_SHIFT; (*tpd)->word1 |= 1 << TPD_CCSUM_EN_SHIFT; } } return 0; } static void atl1c_tx_rollback(struct atl1c_adapter *adpt, struct atl1c_tpd_desc *first_tpd, u32 queue) { struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[queue]; struct atl1c_buffer *buffer_info; struct atl1c_tpd_desc *tpd; u16 first_index, index; first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc; index = first_index; while (index != tpd_ring->next_to_use) { tpd = ATL1C_TPD_DESC(tpd_ring, index); buffer_info = &tpd_ring->buffer_info[index]; atl1c_clean_buffer(adpt->pdev, buffer_info); memset(tpd, 0, sizeof(struct atl1c_tpd_desc)); if (++index == tpd_ring->count) index = 0; } tpd_ring->next_to_use = first_index; } static int atl1c_tx_map(struct atl1c_adapter *adapter, struct sk_buff *skb, struct atl1c_tpd_desc *tpd, u32 queue) { struct atl1c_tpd_desc *use_tpd = NULL; struct atl1c_buffer *buffer_info = NULL; u16 buf_len = skb_headlen(skb); u16 map_len = 0; u16 mapped_len = 0; u16 hdr_len = 0; u16 nr_frags; u16 f; int tso; nr_frags = skb_shinfo(skb)->nr_frags; tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK; if (tso) { /* TSO */ hdr_len = skb_tcp_all_headers(skb); map_len = hdr_len; use_tpd = tpd; buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); buffer_info->length = map_len; buffer_info->dma = dma_map_single(&adapter->pdev->dev, skb->data, hdr_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))) goto err_dma; ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, ATL1C_PCIMAP_TODEVICE); mapped_len += map_len; use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); use_tpd->buffer_len = cpu_to_le16(buffer_info->length); } if (mapped_len < buf_len) { /* mapped_len == 0, means we should use the first tpd, which is given by caller */ if (mapped_len == 0) use_tpd = tpd; else { use_tpd = atl1c_get_tpd(adapter, queue); memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); } buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); buffer_info->length = buf_len - mapped_len; buffer_info->dma = dma_map_single(&adapter->pdev->dev, skb->data + mapped_len, buffer_info->length, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))) goto err_dma; ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, ATL1C_PCIMAP_TODEVICE); use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); use_tpd->buffer_len = cpu_to_le16(buffer_info->length); } for (f = 0; f < nr_frags; f++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; use_tpd = atl1c_get_tpd(adapter, queue); memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); buffer_info->length = skb_frag_size(frag); buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev, frag, 0, buffer_info->length, DMA_TO_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) goto err_dma; ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE, ATL1C_PCIMAP_TODEVICE); use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); use_tpd->buffer_len = cpu_to_le16(buffer_info->length); } /* The last tpd */ use_tpd->word1 |= 1 << TPD_EOP_SHIFT; /* The last buffer info contain the skb address, so it will be free after unmap */ buffer_info->skb = skb; return 0; err_dma: buffer_info->dma = 0; buffer_info->length = 0; return -1; } static void atl1c_tx_queue(struct atl1c_adapter *adapter, u32 queue) { struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue]; AT_WRITE_REGW(&adapter->hw, atl1c_qregs[queue].tpd_prod, tpd_ring->next_to_use); } static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); u32 queue = skb_get_queue_mapping(skb); struct netdev_queue *txq = netdev_get_tx_queue(netdev, queue); struct atl1c_tpd_desc *tpd; u16 tpd_req; if (test_bit(__AT_DOWN, &adapter->flags)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } tpd_req = atl1c_cal_tpd_req(skb); if (atl1c_tpd_avail(adapter, queue) < tpd_req) { /* no enough descriptor, just stop queue */ atl1c_tx_queue(adapter, queue); netif_tx_stop_queue(txq); return NETDEV_TX_BUSY; } tpd = atl1c_get_tpd(adapter, queue); /* do TSO and check sum */ if (atl1c_tso_csum(adapter, skb, &tpd, queue) != 0) { atl1c_tx_queue(adapter, queue); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (unlikely(skb_vlan_tag_present(skb))) { u16 vlan = skb_vlan_tag_get(skb); __le16 tag; vlan = cpu_to_le16(vlan); AT_VLAN_TO_TAG(vlan, tag); tpd->word1 |= 1 << TPD_INS_VTAG_SHIFT; tpd->vlan_tag = tag; } if (skb_network_offset(skb) != ETH_HLEN) tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */ if (atl1c_tx_map(adapter, skb, tpd, queue) < 0) { netif_info(adapter, tx_done, adapter->netdev, "tx-skb dropped due to dma error\n"); /* roll back tpd/buffer */ atl1c_tx_rollback(adapter, tpd, queue); dev_kfree_skb_any(skb); } else { bool more = netdev_xmit_more(); if (__netdev_tx_sent_queue(txq, skb->len, more)) atl1c_tx_queue(adapter, queue); } return NETDEV_TX_OK; } static void atl1c_free_irq(struct atl1c_adapter *adapter) { struct net_device *netdev = adapter->netdev; free_irq(adapter->pdev->irq, netdev); if (adapter->have_msi) pci_disable_msi(adapter->pdev); } static int atl1c_request_irq(struct atl1c_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; int flags = 0; int err = 0; adapter->have_msi = true; err = pci_enable_msi(adapter->pdev); if (err) { if (netif_msg_ifup(adapter)) dev_err(&pdev->dev, "Unable to allocate MSI interrupt Error: %d\n", err); adapter->have_msi = false; } if (!adapter->have_msi) flags |= IRQF_SHARED; err = request_irq(adapter->pdev->irq, atl1c_intr, flags, netdev->name, netdev); if (err) { if (netif_msg_ifup(adapter)) dev_err(&pdev->dev, "Unable to allocate interrupt Error: %d\n", err); if (adapter->have_msi) pci_disable_msi(adapter->pdev); return err; } if (netif_msg_ifup(adapter)) dev_dbg(&pdev->dev, "atl1c_request_irq OK\n"); return err; } static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter) { int i; /* release tx-pending skbs and reset tx/rx ring index */ for (i = 0; i < adapter->tx_queue_count; ++i) atl1c_clean_tx_ring(adapter, i); for (i = 0; i < adapter->rx_queue_count; ++i) atl1c_clean_rx_ring(adapter, i); } static int atl1c_up(struct atl1c_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err; int i; netif_carrier_off(netdev); err = atl1c_configure(adapter); if (unlikely(err)) goto err_up; err = atl1c_request_irq(adapter); if (unlikely(err)) goto err_up; atl1c_check_link_status(adapter); clear_bit(__AT_DOWN, &adapter->flags); for (i = 0; i < adapter->tx_queue_count; ++i) napi_enable(&adapter->tpd_ring[i].napi); for (i = 0; i < adapter->rx_queue_count; ++i) napi_enable(&adapter->rrd_ring[i].napi); atl1c_irq_enable(adapter); netif_start_queue(netdev); return err; err_up: for (i = 0; i < adapter->rx_queue_count; ++i) atl1c_clean_rx_ring(adapter, i); return err; } static void atl1c_down(struct atl1c_adapter *adapter) { struct net_device *netdev = adapter->netdev; int i; atl1c_del_timer(adapter); adapter->work_event = 0; /* clear all event */ /* signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ set_bit(__AT_DOWN, &adapter->flags); netif_carrier_off(netdev); for (i = 0; i < adapter->tx_queue_count; ++i) napi_disable(&adapter->tpd_ring[i].napi); for (i = 0; i < adapter->rx_queue_count; ++i) napi_disable(&adapter->rrd_ring[i].napi); atl1c_irq_disable(adapter); atl1c_free_irq(adapter); /* disable ASPM if device inactive */ atl1c_disable_l0s_l1(&adapter->hw); /* reset MAC to disable all RX/TX */ atl1c_reset_mac(&adapter->hw); msleep(1); adapter->link_speed = SPEED_0; adapter->link_duplex = -1; atl1c_reset_dma_ring(adapter); } /** * atl1c_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. */ static int atl1c_open(struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); int err; /* disallow open during test */ if (test_bit(__AT_TESTING, &adapter->flags)) return -EBUSY; /* allocate rx/tx dma buffer & descriptors */ err = atl1c_setup_ring_resources(adapter); if (unlikely(err)) return err; err = atl1c_up(adapter); if (unlikely(err)) goto err_up; return 0; err_up: atl1c_free_irq(adapter); atl1c_free_ring_resources(adapter); atl1c_reset_mac(&adapter->hw); return err; } /** * atl1c_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. */ static int atl1c_close(struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); set_bit(__AT_DOWN, &adapter->flags); cancel_work_sync(&adapter->common_task); atl1c_down(adapter); atl1c_free_ring_resources(adapter); return 0; } static int atl1c_suspend(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; u32 wufc = adapter->wol; atl1c_disable_l0s_l1(hw); if (netif_running(netdev)) { WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); atl1c_down(adapter); } netif_device_detach(netdev); if (wufc) if (atl1c_phy_to_ps_link(hw) != 0) dev_dbg(dev, "phy power saving failed"); atl1c_power_saving(hw, wufc); return 0; } #ifdef CONFIG_PM_SLEEP static int atl1c_resume(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct atl1c_adapter *adapter = netdev_priv(netdev); AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE); atl1c_phy_reset(&adapter->hw); atl1c_reset_mac(&adapter->hw); atl1c_phy_init(&adapter->hw); netif_device_attach(netdev); if (netif_running(netdev)) atl1c_up(adapter); return 0; } #endif static void atl1c_shutdown(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1c_adapter *adapter = netdev_priv(netdev); atl1c_suspend(&pdev->dev); pci_wake_from_d3(pdev, adapter->wol); pci_set_power_state(pdev, PCI_D3hot); } static const struct net_device_ops atl1c_netdev_ops = { .ndo_open = atl1c_open, .ndo_stop = atl1c_close, .ndo_validate_addr = eth_validate_addr, .ndo_start_xmit = atl1c_xmit_frame, .ndo_set_mac_address = atl1c_set_mac_addr, .ndo_set_rx_mode = atl1c_set_multi, .ndo_change_mtu = atl1c_change_mtu, .ndo_fix_features = atl1c_fix_features, .ndo_set_features = atl1c_set_features, .ndo_eth_ioctl = atl1c_ioctl, .ndo_tx_timeout = atl1c_tx_timeout, .ndo_get_stats = atl1c_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = atl1c_netpoll, #endif }; static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev) { SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); netdev->netdev_ops = &atl1c_netdev_ops; netdev->watchdog_timeo = AT_TX_WATCHDOG; netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN); atl1c_set_ethtool_ops(netdev); /* TODO: add when ready */ netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6; netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX; return 0; } /** * atl1c_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in atl1c_pci_tbl * * Returns 0 on success, negative on failure * * atl1c_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. */ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct atl1c_adapter *adapter; static int cards_found; u8 __iomem *hw_addr; enum atl1c_nic_type nic_type; u32 queue_count = 1; int err = 0; int i; /* enable device (incl. PCI PM wakeup and hotplug setup) */ err = pci_enable_device_mem(pdev); if (err) return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n"); /* * The atl1c chip can DMA to 64-bit addresses, but it uses a single * shared register for the high 32 bits, so only a single, aligned, * 4 GB physical address range can be used at a time. * * Supporting 64-bit DMA on this hardware is more trouble than it's * worth. It is far easier to limit to 32-bit DMA than update * various kernel subsystems to support the mechanics required by a * fixed-high-32-bit system. */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); goto err_dma; } err = pci_request_regions(pdev, atl1c_driver_name); if (err) { dev_err(&pdev->dev, "cannot obtain PCI resources\n"); goto err_pci_reg; } pci_set_master(pdev); hw_addr = pci_ioremap_bar(pdev, 0); if (!hw_addr) { err = -EIO; dev_err(&pdev->dev, "cannot map device registers\n"); goto err_ioremap; } nic_type = atl1c_get_mac_type(pdev, hw_addr); if (nic_type == athr_mt) queue_count = 4; netdev = alloc_etherdev_mq(sizeof(struct atl1c_adapter), queue_count); if (netdev == NULL) { err = -ENOMEM; goto err_alloc_etherdev; } err = atl1c_init_netdev(netdev, pdev); if (err) { dev_err(&pdev->dev, "init netdevice failed\n"); goto err_init_netdev; } adapter = netdev_priv(netdev); adapter->bd_number = cards_found; adapter->netdev = netdev; adapter->pdev = pdev; adapter->hw.adapter = adapter; adapter->hw.nic_type = nic_type; adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg); adapter->hw.hw_addr = hw_addr; adapter->tx_queue_count = queue_count; adapter->rx_queue_count = queue_count; /* init mii data */ adapter->mii.dev = netdev; adapter->mii.mdio_read = atl1c_mdio_read; adapter->mii.mdio_write = atl1c_mdio_write; adapter->mii.phy_id_mask = 0x1f; adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK; dev_set_threaded(netdev, true); for (i = 0; i < adapter->rx_queue_count; ++i) netif_napi_add(netdev, &adapter->rrd_ring[i].napi, atl1c_clean_rx); for (i = 0; i < adapter->tx_queue_count; ++i) netif_napi_add_tx(netdev, &adapter->tpd_ring[i].napi, atl1c_clean_tx); timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0); /* setup the private structure */ err = atl1c_sw_init(adapter); if (err) { dev_err(&pdev->dev, "net device private data init failed\n"); goto err_sw_init; } /* set max MTU */ atl1c_set_max_mtu(netdev); atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE); /* Init GPHY as early as possible due to power saving issue */ atl1c_phy_reset(&adapter->hw); err = atl1c_reset_mac(&adapter->hw); if (err) { err = -EIO; goto err_reset; } /* reset the controller to * put the device in a known good starting state */ err = atl1c_phy_init(&adapter->hw); if (err) { err = -EIO; goto err_reset; } if (atl1c_read_mac_addr(&adapter->hw)) { /* got a random MAC address, set NET_ADDR_RANDOM to netdev */ netdev->addr_assign_type = NET_ADDR_RANDOM; } eth_hw_addr_set(netdev, adapter->hw.mac_addr); if (netif_msg_probe(adapter)) dev_dbg(&pdev->dev, "mac address : %pM\n", adapter->hw.mac_addr); atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr); INIT_WORK(&adapter->common_task, atl1c_common_task); adapter->work_event = 0; err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "register netdevice failed\n"); goto err_register; } cards_found++; return 0; err_reset: err_register: err_sw_init: err_init_netdev: free_netdev(netdev); err_alloc_etherdev: iounmap(hw_addr); err_ioremap: pci_release_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /** * atl1c_remove - Device Removal Routine * @pdev: PCI device information struct * * atl1c_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. */ static void atl1c_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1c_adapter *adapter = netdev_priv(netdev); unregister_netdev(netdev); /* restore permanent address */ atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.perm_mac_addr); atl1c_phy_disable(&adapter->hw); iounmap(adapter->hw.hw_addr); pci_release_regions(pdev); pci_disable_device(pdev); free_netdev(netdev); } /** * atl1c_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected. */ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1c_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (netif_running(netdev)) atl1c_down(adapter); pci_disable_device(pdev); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** * atl1c_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the e1000_resume routine. */ static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1c_adapter *adapter = netdev_priv(netdev); if (pci_enable_device(pdev)) { if (netif_msg_hw(adapter)) dev_err(&pdev->dev, "Cannot re-enable PCI device after reset\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); atl1c_reset_mac(&adapter->hw); return PCI_ERS_RESULT_RECOVERED; } /** * atl1c_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the * second-half of the atl1c_resume routine. */ static void atl1c_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1c_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) { if (atl1c_up(adapter)) { if (netif_msg_hw(adapter)) dev_err(&pdev->dev, "Cannot bring device back up after reset\n"); return; } } netif_device_attach(netdev); } static const struct pci_error_handlers atl1c_err_handler = { .error_detected = atl1c_io_error_detected, .slot_reset = atl1c_io_slot_reset, .resume = atl1c_io_resume, }; static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume); static struct pci_driver atl1c_driver = { .name = atl1c_driver_name, .id_table = atl1c_pci_tbl, .probe = atl1c_probe, .remove = atl1c_remove, .shutdown = atl1c_shutdown, .err_handler = &atl1c_err_handler, .driver.pm = &atl1c_pm_ops, }; module_pci_driver(atl1c_driver);
linux-master
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 2007 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. */ #include <linux/pci.h> #include <linux/delay.h> #include <linux/mii.h> #include <linux/crc32.h> #include "atl1c.h" /* * check_eeprom_exist * return 1 if eeprom exist */ int atl1c_check_eeprom_exist(struct atl1c_hw *hw) { u32 data; AT_READ_REG(hw, REG_TWSI_DEBUG, &data); if (data & TWSI_DEBUG_DEV_EXIST) return 1; AT_READ_REG(hw, REG_MASTER_CTRL, &data); if (data & MASTER_CTRL_OTP_SEL) return 1; return 0; } void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr) { u32 value; /* * 00-0B-6A-F6-00-DC * 0: 6AF600DC 1: 000B * low dword */ value = mac_addr[2] << 24 | mac_addr[3] << 16 | mac_addr[4] << 8 | mac_addr[5]; AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); /* hight dword */ value = mac_addr[0] << 8 | mac_addr[1]; AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); } /* read mac address from hardware register */ static bool atl1c_read_current_addr(struct atl1c_hw *hw, u8 *eth_addr) { u32 addr[2]; AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]); AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]); *(u32 *) &eth_addr[2] = htonl(addr[0]); *(u16 *) &eth_addr[0] = htons((u16)addr[1]); return is_valid_ether_addr(eth_addr); } /* * atl1c_get_permanent_address * return 0 if get valid mac address, */ static int atl1c_get_permanent_address(struct atl1c_hw *hw) { u32 i; u32 otp_ctrl_data; u32 twsi_ctrl_data; u16 phy_data; bool raise_vol = false; /* MAC-address from BIOS is the 1st priority */ if (atl1c_read_current_addr(hw, hw->perm_mac_addr)) return 0; /* init */ AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); if (atl1c_check_eeprom_exist(hw)) { if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { /* Enable OTP CLK */ if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) { otp_ctrl_data |= OTP_CTRL_CLK_EN; AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); AT_WRITE_FLUSH(hw); msleep(1); } } /* raise voltage temporally for l2cb */ if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) { atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data); phy_data &= ~ANACTRL_HB_EN; atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data); atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data); phy_data |= VOLT_CTRL_SWLOWEST; atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data); udelay(20); raise_vol = true; } AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data); for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) { msleep(10); AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0) break; } if (i >= AT_TWSI_EEPROM_TIMEOUT) return -1; } /* Disable OTP_CLK */ if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) { otp_ctrl_data &= ~OTP_CTRL_CLK_EN; AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); msleep(1); } if (raise_vol) { atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data); phy_data |= ANACTRL_HB_EN; atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data); atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data); phy_data &= ~VOLT_CTRL_SWLOWEST; atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data); udelay(20); } if (atl1c_read_current_addr(hw, hw->perm_mac_addr)) return 0; return -1; } bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value) { int i; bool ret = false; u32 otp_ctrl_data; u32 control; u32 data; if (offset & 3) return ret; /* address do not align */ AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) AT_WRITE_REG(hw, REG_OTP_CTRL, (otp_ctrl_data | OTP_CTRL_CLK_EN)); AT_WRITE_REG(hw, REG_EEPROM_DATA_LO, 0); control = (offset & EEPROM_CTRL_ADDR_MASK) << EEPROM_CTRL_ADDR_SHIFT; AT_WRITE_REG(hw, REG_EEPROM_CTRL, control); for (i = 0; i < 10; i++) { udelay(100); AT_READ_REG(hw, REG_EEPROM_CTRL, &control); if (control & EEPROM_CTRL_RW) break; } if (control & EEPROM_CTRL_RW) { AT_READ_REG(hw, REG_EEPROM_CTRL, &data); AT_READ_REG(hw, REG_EEPROM_DATA_LO, p_value); data = data & 0xFFFF; *p_value = swab32((data << 16) | (*p_value >> 16)); ret = true; } if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); return ret; } /* * Reads the adapter's MAC address from the EEPROM * * hw - Struct containing variables accessed by shared code */ int atl1c_read_mac_addr(struct atl1c_hw *hw) { int err = 0; err = atl1c_get_permanent_address(hw); if (err) eth_random_addr(hw->perm_mac_addr); memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr)); return err; } /* * atl1c_hash_mc_addr * purpose * set hash value for a multicast address * hash calcu processing : * 1. calcu 32bit CRC for multicast address * 2. reverse crc with MSB to LSB */ u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr) { u32 crc32; u32 value = 0; int i; crc32 = ether_crc_le(6, mc_addr); for (i = 0; i < 32; i++) value |= (((crc32 >> i) & 1) << (31 - i)); return value; } /* * Sets the bit in the multicast table corresponding to the hash value. * hw - Struct containing variables accessed by shared code * hash_value - Multicast address hash value */ void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value) { u32 hash_bit, hash_reg; u32 mta; /* * The HASH Table is a register array of 2 32-bit registers. * It is treated like an array of 64 bits. We want to set * bit BitArray[hash_value]. So we figure out what register * the bit is in, read it, OR in the new bit, then write * back the new value. The register is determined by the * upper bit of the hash value and the bit within that * register are determined by the lower 5 bits of the value. */ hash_reg = (hash_value >> 31) & 0x1; hash_bit = (hash_value >> 26) & 0x1F; mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); mta |= (1 << hash_bit); AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); } /* * wait mdio module be idle * return true: idle * false: still busy */ bool atl1c_wait_mdio_idle(struct atl1c_hw *hw) { u32 val; int i; for (i = 0; i < MDIO_MAX_AC_TO; i++) { AT_READ_REG(hw, REG_MDIO_CTRL, &val); if (!(val & (MDIO_CTRL_BUSY | MDIO_CTRL_START))) break; udelay(10); } return i != MDIO_MAX_AC_TO; } void atl1c_stop_phy_polling(struct atl1c_hw *hw) { if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION)) return; AT_WRITE_REG(hw, REG_MDIO_CTRL, 0); atl1c_wait_mdio_idle(hw); } void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel) { u32 val; if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION)) return; val = MDIO_CTRL_SPRES_PRMBL | FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | FIELDX(MDIO_CTRL_REG, 1) | MDIO_CTRL_START | MDIO_CTRL_OP_READ; AT_WRITE_REG(hw, REG_MDIO_CTRL, val); atl1c_wait_mdio_idle(hw); val |= MDIO_CTRL_AP_EN; val &= ~MDIO_CTRL_START; AT_WRITE_REG(hw, REG_MDIO_CTRL, val); udelay(30); } /* * atl1c_read_phy_core * core function to read register in PHY via MDIO control register. * ext: extension register (see IEEE 802.3) * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) * reg: reg to read */ int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, u16 reg, u16 *phy_data) { u32 val; u16 clk_sel = MDIO_CTRL_CLK_25_4; atl1c_stop_phy_polling(hw); *phy_data = 0; /* only l2c_b2 & l1d_2 could use slow clock */ if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) && hw->hibernate) clk_sel = MDIO_CTRL_CLK_25_128; if (ext) { val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg); AT_WRITE_REG(hw, REG_MDIO_EXTN, val); val = MDIO_CTRL_SPRES_PRMBL | FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | MDIO_CTRL_START | MDIO_CTRL_MODE_EXT | MDIO_CTRL_OP_READ; } else { val = MDIO_CTRL_SPRES_PRMBL | FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | FIELDX(MDIO_CTRL_REG, reg) | MDIO_CTRL_START | MDIO_CTRL_OP_READ; } AT_WRITE_REG(hw, REG_MDIO_CTRL, val); if (!atl1c_wait_mdio_idle(hw)) return -1; AT_READ_REG(hw, REG_MDIO_CTRL, &val); *phy_data = (u16)FIELD_GETX(val, MDIO_CTRL_DATA); atl1c_start_phy_polling(hw, clk_sel); return 0; } /* * atl1c_write_phy_core * core function to write to register in PHY via MDIO control register. * ext: extension register (see IEEE 802.3) * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) * reg: reg to write */ int atl1c_write_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, u16 reg, u16 phy_data) { u32 val; u16 clk_sel = MDIO_CTRL_CLK_25_4; atl1c_stop_phy_polling(hw); /* only l2c_b2 & l1d_2 could use slow clock */ if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) && hw->hibernate) clk_sel = MDIO_CTRL_CLK_25_128; if (ext) { val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg); AT_WRITE_REG(hw, REG_MDIO_EXTN, val); val = MDIO_CTRL_SPRES_PRMBL | FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | FIELDX(MDIO_CTRL_DATA, phy_data) | MDIO_CTRL_START | MDIO_CTRL_MODE_EXT; } else { val = MDIO_CTRL_SPRES_PRMBL | FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | FIELDX(MDIO_CTRL_DATA, phy_data) | FIELDX(MDIO_CTRL_REG, reg) | MDIO_CTRL_START; } AT_WRITE_REG(hw, REG_MDIO_CTRL, val); if (!atl1c_wait_mdio_idle(hw)) return -1; atl1c_start_phy_polling(hw, clk_sel); return 0; } /* * Reads the value from a PHY register * hw - Struct containing variables accessed by shared code * reg_addr - address of the PHY register to read */ int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data) { return atl1c_read_phy_core(hw, false, 0, reg_addr, phy_data); } /* * Writes a value to a PHY register * hw - Struct containing variables accessed by shared code * reg_addr - address of the PHY register to write * data - data to write to the PHY */ int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data) { return atl1c_write_phy_core(hw, false, 0, reg_addr, phy_data); } /* read from PHY extension register */ int atl1c_read_phy_ext(struct atl1c_hw *hw, u8 dev_addr, u16 reg_addr, u16 *phy_data) { return atl1c_read_phy_core(hw, true, dev_addr, reg_addr, phy_data); } /* write to PHY extension register */ int atl1c_write_phy_ext(struct atl1c_hw *hw, u8 dev_addr, u16 reg_addr, u16 phy_data) { return atl1c_write_phy_core(hw, true, dev_addr, reg_addr, phy_data); } int atl1c_read_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data) { int err; err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr); if (unlikely(err)) return err; else err = atl1c_read_phy_reg(hw, MII_DBG_DATA, phy_data); return err; } int atl1c_write_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 phy_data) { int err; err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr); if (unlikely(err)) return err; else err = atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data); return err; } /* * Configures PHY autoneg and flow control advertisement settings * * hw - Struct containing variables accessed by shared code */ static int atl1c_phy_setup_adv(struct atl1c_hw *hw) { u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL; u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP & ~GIGA_CR_1000T_SPEED_MASK; if (hw->autoneg_advertised & ADVERTISED_10baseT_Half) mii_adv_data |= ADVERTISE_10HALF; if (hw->autoneg_advertised & ADVERTISED_10baseT_Full) mii_adv_data |= ADVERTISE_10FULL; if (hw->autoneg_advertised & ADVERTISED_100baseT_Half) mii_adv_data |= ADVERTISE_100HALF; if (hw->autoneg_advertised & ADVERTISED_100baseT_Full) mii_adv_data |= ADVERTISE_100FULL; if (hw->autoneg_advertised & ADVERTISED_Autoneg) mii_adv_data |= ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_100FULL; if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) { if (hw->autoneg_advertised & ADVERTISED_1000baseT_Half) mii_giga_ctrl_data |= ADVERTISE_1000HALF; if (hw->autoneg_advertised & ADVERTISED_1000baseT_Full) mii_giga_ctrl_data |= ADVERTISE_1000FULL; if (hw->autoneg_advertised & ADVERTISED_Autoneg) mii_giga_ctrl_data |= ADVERTISE_1000HALF | ADVERTISE_1000FULL; } if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 || atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0) return -1; return 0; } void atl1c_phy_disable(struct atl1c_hw *hw) { atl1c_power_saving(hw, 0); } int atl1c_phy_reset(struct atl1c_hw *hw) { struct atl1c_adapter *adapter = hw->adapter; struct pci_dev *pdev = adapter->pdev; u16 phy_data; u32 phy_ctrl_data, lpi_ctrl; int err; /* reset PHY core */ AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl_data); phy_ctrl_data &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_GATE_25M_EN | GPHY_CTRL_PWDOWN_HW | GPHY_CTRL_CLS); phy_ctrl_data |= GPHY_CTRL_SEL_ANA_RST; if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE)) phy_ctrl_data |= (GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE); else phy_ctrl_data &= ~(GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE); AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data); AT_WRITE_FLUSH(hw); udelay(10); AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data | GPHY_CTRL_EXT_RESET); AT_WRITE_FLUSH(hw); udelay(10 * GPHY_CTRL_EXT_RST_TO); /* delay 800us */ /* switch clock */ if (hw->nic_type == athr_l2c_b) { atl1c_read_phy_dbg(hw, MIIDBG_CFGLPSPD, &phy_data); atl1c_write_phy_dbg(hw, MIIDBG_CFGLPSPD, phy_data & ~CFGLPSPD_RSTCNT_CLK125SW); } /* tx-half amplitude issue fix */ if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) { atl1c_read_phy_dbg(hw, MIIDBG_CABLE1TH_DET, &phy_data); phy_data |= CABLE1TH_DET_EN; atl1c_write_phy_dbg(hw, MIIDBG_CABLE1TH_DET, phy_data); } /* clear bit3 of dbgport 3B to lower voltage */ if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE)) { if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) { atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data); phy_data &= ~VOLT_CTRL_SWLOWEST; atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data); } /* power saving config */ phy_data = hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 ? L1D_LEGCYPS_DEF : L1C_LEGCYPS_DEF; atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS, phy_data); /* hib */ atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL, SYSMODCTRL_IECHOADJ_DEF); } else { /* disable pws */ atl1c_read_phy_dbg(hw, MIIDBG_LEGCYPS, &phy_data); atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS, phy_data & ~LEGCYPS_EN); /* disable hibernate */ atl1c_read_phy_dbg(hw, MIIDBG_HIBNEG, &phy_data); atl1c_write_phy_dbg(hw, MIIDBG_HIBNEG, phy_data & HIBNEG_PSHIB_EN); } /* disable AZ(EEE) by default */ if (hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 || hw->nic_type == athr_l2c_b2) { AT_READ_REG(hw, REG_LPI_CTRL, &lpi_ctrl); AT_WRITE_REG(hw, REG_LPI_CTRL, lpi_ctrl & ~LPI_CTRL_EN); atl1c_write_phy_ext(hw, MIIEXT_ANEG, MIIEXT_LOCAL_EEEADV, 0); atl1c_write_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL3, L2CB_CLDCTRL3); } /* other debug port to set */ atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, ANACTRL_DEF); atl1c_write_phy_dbg(hw, MIIDBG_SRDSYSMOD, SRDSYSMOD_DEF); atl1c_write_phy_dbg(hw, MIIDBG_TST10BTCFG, TST10BTCFG_DEF); /* UNH-IOL test issue, set bit7 */ atl1c_write_phy_dbg(hw, MIIDBG_TST100BTCFG, TST100BTCFG_DEF | TST100BTCFG_LITCH_EN); /* set phy interrupt mask */ phy_data = IER_LINK_UP | IER_LINK_DOWN; err = atl1c_write_phy_reg(hw, MII_IER, phy_data); if (err) { if (netif_msg_hw(adapter)) dev_err(&pdev->dev, "Error enable PHY linkChange Interrupt\n"); return err; } return 0; } int atl1c_phy_init(struct atl1c_hw *hw) { struct atl1c_adapter *adapter = hw->adapter; struct pci_dev *pdev = adapter->pdev; int ret_val; u16 mii_bmcr_data = BMCR_RESET; if (hw->nic_type == athr_mt) { hw->phy_configured = true; return 0; } if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) || (atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) { dev_err(&pdev->dev, "Error get phy ID\n"); return -1; } switch (hw->media_type) { case MEDIA_TYPE_AUTO_SENSOR: ret_val = atl1c_phy_setup_adv(hw); if (ret_val) { if (netif_msg_link(adapter)) dev_err(&pdev->dev, "Error Setting up Auto-Negotiation\n"); return ret_val; } mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART; break; case MEDIA_TYPE_100M_FULL: mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX; break; case MEDIA_TYPE_100M_HALF: mii_bmcr_data |= BMCR_SPEED100; break; case MEDIA_TYPE_10M_FULL: mii_bmcr_data |= BMCR_FULLDPLX; break; case MEDIA_TYPE_10M_HALF: break; default: if (netif_msg_link(adapter)) dev_err(&pdev->dev, "Wrong Media type %d\n", hw->media_type); return -1; } ret_val = atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); if (ret_val) return ret_val; hw->phy_configured = true; return 0; } bool atl1c_get_link_status(struct atl1c_hw *hw) { u16 phy_data; if (hw->nic_type == athr_mt) { u32 spd; AT_READ_REG(hw, REG_MT_SPEED, &spd); return !!spd; } /* MII_BMSR must be read twice */ atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); return !!(phy_data & BMSR_LSTATUS); } /* * Detects the current speed and duplex settings of the hardware. * * hw - Struct containing variables accessed by shared code * speed - Speed of the connection * duplex - Duplex setting of the connection */ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex) { int err; u16 phy_data; if (hw->nic_type == athr_mt) { u32 spd; AT_READ_REG(hw, REG_MT_SPEED, &spd); *speed = spd; *duplex = FULL_DUPLEX; return 0; } /* Read PHY Specific Status Register (17) */ err = atl1c_read_phy_reg(hw, MII_GIGA_PSSR, &phy_data); if (err) return err; if (!(phy_data & GIGA_PSSR_SPD_DPLX_RESOLVED)) return -1; switch (phy_data & GIGA_PSSR_SPEED) { case GIGA_PSSR_1000MBS: *speed = SPEED_1000; break; case GIGA_PSSR_100MBS: *speed = SPEED_100; break; case GIGA_PSSR_10MBS: *speed = SPEED_10; break; default: return -1; } if (phy_data & GIGA_PSSR_DPLX) *duplex = FULL_DUPLEX; else *duplex = HALF_DUPLEX; return 0; } /* select one link mode to get lower power consumption */ int atl1c_phy_to_ps_link(struct atl1c_hw *hw) { struct atl1c_adapter *adapter = hw->adapter; struct pci_dev *pdev = adapter->pdev; int ret = 0; u16 autoneg_advertised = ADVERTISED_10baseT_Half; u16 save_autoneg_advertised; u16 mii_lpa_data; u16 speed = SPEED_0; u16 duplex = FULL_DUPLEX; int i; if (atl1c_get_link_status(hw)) { atl1c_read_phy_reg(hw, MII_LPA, &mii_lpa_data); if (mii_lpa_data & LPA_10FULL) autoneg_advertised = ADVERTISED_10baseT_Full; else if (mii_lpa_data & LPA_10HALF) autoneg_advertised = ADVERTISED_10baseT_Half; else if (mii_lpa_data & LPA_100HALF) autoneg_advertised = ADVERTISED_100baseT_Half; else if (mii_lpa_data & LPA_100FULL) autoneg_advertised = ADVERTISED_100baseT_Full; save_autoneg_advertised = hw->autoneg_advertised; hw->phy_configured = false; hw->autoneg_advertised = autoneg_advertised; if (atl1c_restart_autoneg(hw) != 0) { dev_dbg(&pdev->dev, "phy autoneg failed\n"); ret = -1; } hw->autoneg_advertised = save_autoneg_advertised; if (mii_lpa_data) { for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { mdelay(100); if (atl1c_get_link_status(hw)) { if (atl1c_get_speed_and_duplex(hw, &speed, &duplex) != 0) dev_dbg(&pdev->dev, "get speed and duplex failed\n"); break; } } } } else { speed = SPEED_10; duplex = HALF_DUPLEX; } adapter->link_speed = speed; adapter->link_duplex = duplex; return ret; } int atl1c_restart_autoneg(struct atl1c_hw *hw) { int err = 0; u16 mii_bmcr_data = BMCR_RESET; err = atl1c_phy_setup_adv(hw); if (err) return err; mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART; return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); } int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc) { struct atl1c_adapter *adapter = hw->adapter; struct pci_dev *pdev = adapter->pdev; u32 master_ctrl, mac_ctrl, phy_ctrl; u32 wol_ctrl, speed; u16 phy_data; wol_ctrl = 0; speed = adapter->link_speed == SPEED_1000 ? MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100; AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl); AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl); AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl); master_ctrl &= ~MASTER_CTRL_CLK_SEL_DIS; mac_ctrl = FIELD_SETX(mac_ctrl, MAC_CTRL_SPEED, speed); mac_ctrl &= ~(MAC_CTRL_DUPLX | MAC_CTRL_RX_EN | MAC_CTRL_TX_EN); if (adapter->link_duplex == FULL_DUPLEX) mac_ctrl |= MAC_CTRL_DUPLX; phy_ctrl &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_CLS); phy_ctrl |= GPHY_CTRL_SEL_ANA_RST | GPHY_CTRL_HIB_PULSE | GPHY_CTRL_HIB_EN; if (!wufc) { /* without WoL */ master_ctrl |= MASTER_CTRL_CLK_SEL_DIS; phy_ctrl |= GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_PWDOWN_HW; AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl); AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl); AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl); AT_WRITE_REG(hw, REG_WOL_CTRL, 0); hw->phy_configured = false; /* re-init PHY when resume */ return 0; } phy_ctrl |= GPHY_CTRL_EXT_RESET; if (wufc & AT_WUFC_MAG) { mac_ctrl |= MAC_CTRL_RX_EN | MAC_CTRL_BC_EN; wol_ctrl |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V11) wol_ctrl |= WOL_PATTERN_EN | WOL_PATTERN_PME_EN; } if (wufc & AT_WUFC_LNKC) { wol_ctrl |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) { dev_dbg(&pdev->dev, "%s: write phy MII_IER failed.\n", atl1c_driver_name); } } /* clear PHY interrupt */ atl1c_read_phy_reg(hw, MII_ISR, &phy_data); dev_dbg(&pdev->dev, "%s: suspend MAC=%x,MASTER=%x,PHY=0x%x,WOL=%x\n", atl1c_driver_name, mac_ctrl, master_ctrl, phy_ctrl, wol_ctrl); AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl); AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl); AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl); AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl); return 0; } /* configure phy after Link change Event */ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed) { u16 phy_val; bool adj_thresh = false; if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2) adj_thresh = true; if (link_speed != SPEED_0) { /* link up */ /* az with brcm, half-amp */ if (hw->nic_type == athr_l1d_2) { atl1c_read_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL6, &phy_val); phy_val = FIELD_GETX(phy_val, CLDCTRL6_CAB_LEN); phy_val = phy_val > CLDCTRL6_CAB_LEN_SHORT ? AZ_ANADECT_LONG : AZ_ANADECT_DEF; atl1c_write_phy_dbg(hw, MIIDBG_AZ_ANADECT, phy_val); } /* threshold adjust */ if (adj_thresh && link_speed == SPEED_100 && hw->msi_lnkpatch) { atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB, L1D_MSE16DB_UP); atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL, L1D_SYSMODCTRL_IECHOADJ_DEF); } } else { /* link down */ if (adj_thresh && hw->msi_lnkpatch) { atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL, SYSMODCTRL_IECHOADJ_DEF); atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB, L1D_MSE16DB_DOWN); } } }
linux-master
drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 2009 - 2009 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. */ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/slab.h> #include "atl1c.h" static int atl1c_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; u32 supported, advertising; supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP); if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) supported |= SUPPORTED_1000baseT_Full; advertising = ADVERTISED_TP; advertising |= hw->autoneg_advertised; cmd->base.port = PORT_TP; cmd->base.phy_address = 0; if (adapter->link_speed != SPEED_0) { cmd->base.speed = adapter->link_speed; if (adapter->link_duplex == FULL_DUPLEX) cmd->base.duplex = DUPLEX_FULL; else cmd->base.duplex = DUPLEX_HALF; } else { cmd->base.speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN; } cmd->base.autoneg = AUTONEG_ENABLE; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); return 0; } static int atl1c_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; u16 autoneg_advertised; while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) msleep(1); if (cmd->base.autoneg == AUTONEG_ENABLE) { autoneg_advertised = ADVERTISED_Autoneg; } else { u32 speed = cmd->base.speed; if (speed == SPEED_1000) { if (cmd->base.duplex != DUPLEX_FULL) { if (netif_msg_link(adapter)) dev_warn(&adapter->pdev->dev, "1000M half is invalid\n"); clear_bit(__AT_RESETTING, &adapter->flags); return -EINVAL; } autoneg_advertised = ADVERTISED_1000baseT_Full; } else if (speed == SPEED_100) { if (cmd->base.duplex == DUPLEX_FULL) autoneg_advertised = ADVERTISED_100baseT_Full; else autoneg_advertised = ADVERTISED_100baseT_Half; } else { if (cmd->base.duplex == DUPLEX_FULL) autoneg_advertised = ADVERTISED_10baseT_Full; else autoneg_advertised = ADVERTISED_10baseT_Half; } } if (hw->autoneg_advertised != autoneg_advertised) { hw->autoneg_advertised = autoneg_advertised; if (atl1c_restart_autoneg(hw) != 0) { if (netif_msg_link(adapter)) dev_warn(&adapter->pdev->dev, "ethtool speed/duplex setting failed\n"); clear_bit(__AT_RESETTING, &adapter->flags); return -EINVAL; } } clear_bit(__AT_RESETTING, &adapter->flags); return 0; } static u32 atl1c_get_msglevel(struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); return adapter->msg_enable; } static void atl1c_set_msglevel(struct net_device *netdev, u32 data) { struct atl1c_adapter *adapter = netdev_priv(netdev); adapter->msg_enable = data; } static int atl1c_get_regs_len(struct net_device *netdev) { return AT_REGS_LEN; } static void atl1c_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; u32 *regs_buff = p; u16 phy_data; memset(p, 0, AT_REGS_LEN); regs->version = 1; AT_READ_REG(hw, REG_PM_CTRL, p++); AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++); AT_READ_REG(hw, REG_TWSI_CTRL, p++); AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL, p++); AT_READ_REG(hw, REG_MASTER_CTRL, p++); AT_READ_REG(hw, REG_MANUAL_TIMER_INIT, p++); AT_READ_REG(hw, REG_IRQ_MODRT_TIMER_INIT, p++); AT_READ_REG(hw, REG_GPHY_CTRL, p++); AT_READ_REG(hw, REG_LINK_CTRL, p++); AT_READ_REG(hw, REG_IDLE_STATUS, p++); AT_READ_REG(hw, REG_MDIO_CTRL, p++); AT_READ_REG(hw, REG_SERDES, p++); AT_READ_REG(hw, REG_MAC_CTRL, p++); AT_READ_REG(hw, REG_MAC_IPG_IFG, p++); AT_READ_REG(hw, REG_MAC_STA_ADDR, p++); AT_READ_REG(hw, REG_MAC_STA_ADDR+4, p++); AT_READ_REG(hw, REG_RX_HASH_TABLE, p++); AT_READ_REG(hw, REG_RX_HASH_TABLE+4, p++); AT_READ_REG(hw, REG_RXQ_CTRL, p++); AT_READ_REG(hw, REG_TXQ_CTRL, p++); AT_READ_REG(hw, REG_MTU, p++); AT_READ_REG(hw, REG_WOL_CTRL, p++); atl1c_read_phy_reg(hw, MII_BMCR, &phy_data); regs_buff[AT_REGS_LEN/sizeof(u32) - 2] = (u32) phy_data; atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); regs_buff[AT_REGS_LEN/sizeof(u32) - 1] = (u32) phy_data; } static int atl1c_get_eeprom_len(struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); if (atl1c_check_eeprom_exist(&adapter->hw)) return AT_EEPROM_LEN; else return 0; } static int atl1c_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; u32 *eeprom_buff; int first_dword, last_dword; int ret_val = 0; int i; if (eeprom->len == 0) return -EINVAL; if (!atl1c_check_eeprom_exist(hw)) /* not exist */ return -EINVAL; eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16); first_dword = eeprom->offset >> 2; last_dword = (eeprom->offset + eeprom->len - 1) >> 2; eeprom_buff = kmalloc_array(last_dword - first_dword + 1, sizeof(u32), GFP_KERNEL); if (eeprom_buff == NULL) return -ENOMEM; for (i = first_dword; i < last_dword; i++) { if (!atl1c_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) { kfree(eeprom_buff); return -EIO; } } memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), eeprom->len); kfree(eeprom_buff); return ret_val; return 0; } static void atl1c_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct atl1c_adapter *adapter = netdev_priv(netdev); strscpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver)); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); } static void atl1c_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl1c_adapter *adapter = netdev_priv(netdev); wol->supported = WAKE_MAGIC | WAKE_PHY; wol->wolopts = 0; if (adapter->wol & AT_WUFC_EX) wol->wolopts |= WAKE_UCAST; if (adapter->wol & AT_WUFC_MC) wol->wolopts |= WAKE_MCAST; if (adapter->wol & AT_WUFC_BC) wol->wolopts |= WAKE_BCAST; if (adapter->wol & AT_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; if (adapter->wol & AT_WUFC_LNKC) wol->wolopts |= WAKE_PHY; } static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl1c_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) return -EOPNOTSUPP; /* these settings will always override what we currently have */ adapter->wol = 0; if (wol->wolopts & WAKE_MAGIC) adapter->wol |= AT_WUFC_MAG; if (wol->wolopts & WAKE_PHY) adapter->wol |= AT_WUFC_LNKC; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); return 0; } static int atl1c_nway_reset(struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) atl1c_reinit_locked(adapter); return 0; } static const struct ethtool_ops atl1c_ethtool_ops = { .get_drvinfo = atl1c_get_drvinfo, .get_regs_len = atl1c_get_regs_len, .get_regs = atl1c_get_regs, .get_wol = atl1c_get_wol, .set_wol = atl1c_set_wol, .get_msglevel = atl1c_get_msglevel, .set_msglevel = atl1c_set_msglevel, .nway_reset = atl1c_nway_reset, .get_link = ethtool_op_get_link, .get_eeprom_len = atl1c_get_eeprom_len, .get_eeprom = atl1c_get_eeprom, .get_link_ksettings = atl1c_get_link_ksettings, .set_link_ksettings = atl1c_set_link_ksettings, }; void atl1c_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &atl1c_ethtool_ops; }
linux-master
drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 2007 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. */ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/slab.h> #include "atl1e.h" static int atl1e_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; u32 supported, advertising; supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP); if (hw->nic_type == athr_l1e) supported |= SUPPORTED_1000baseT_Full; advertising = ADVERTISED_TP; advertising |= ADVERTISED_Autoneg; advertising |= hw->autoneg_advertised; cmd->base.port = PORT_TP; cmd->base.phy_address = 0; if (adapter->link_speed != SPEED_0) { cmd->base.speed = adapter->link_speed; if (adapter->link_duplex == FULL_DUPLEX) cmd->base.duplex = DUPLEX_FULL; else cmd->base.duplex = DUPLEX_HALF; } else { cmd->base.speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN; } cmd->base.autoneg = AUTONEG_ENABLE; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); return 0; } static int atl1e_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; u32 advertising; ethtool_convert_link_mode_to_legacy_u32(&advertising, cmd->link_modes.advertising); while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) msleep(1); if (cmd->base.autoneg == AUTONEG_ENABLE) { u16 adv4, adv9; if (advertising & ADVERTISE_1000_FULL) { if (hw->nic_type == athr_l1e) { hw->autoneg_advertised = advertising & AT_ADV_MASK; } else { clear_bit(__AT_RESETTING, &adapter->flags); return -EINVAL; } } else if (advertising & ADVERTISE_1000_HALF) { clear_bit(__AT_RESETTING, &adapter->flags); return -EINVAL; } else { hw->autoneg_advertised = advertising & AT_ADV_MASK; } advertising = hw->autoneg_advertised | ADVERTISED_TP | ADVERTISED_Autoneg; adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL; adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK; if (hw->autoneg_advertised & ADVERTISE_10_HALF) adv4 |= ADVERTISE_10HALF; if (hw->autoneg_advertised & ADVERTISE_10_FULL) adv4 |= ADVERTISE_10FULL; if (hw->autoneg_advertised & ADVERTISE_100_HALF) adv4 |= ADVERTISE_100HALF; if (hw->autoneg_advertised & ADVERTISE_100_FULL) adv4 |= ADVERTISE_100FULL; if (hw->autoneg_advertised & ADVERTISE_1000_FULL) adv9 |= ADVERTISE_1000FULL; if (adv4 != hw->mii_autoneg_adv_reg || adv9 != hw->mii_1000t_ctrl_reg) { hw->mii_autoneg_adv_reg = adv4; hw->mii_1000t_ctrl_reg = adv9; hw->re_autoneg = true; } } else { clear_bit(__AT_RESETTING, &adapter->flags); return -EINVAL; } /* reset the link */ if (netif_running(adapter->netdev)) { atl1e_down(adapter); atl1e_up(adapter); } else atl1e_reset_hw(&adapter->hw); clear_bit(__AT_RESETTING, &adapter->flags); return 0; } static u32 atl1e_get_msglevel(struct net_device *netdev) { #ifdef DBG return 1; #else return 0; #endif } static int atl1e_get_regs_len(struct net_device *netdev) { return AT_REGS_LEN * sizeof(u32); } static void atl1e_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; u32 *regs_buff = p; u16 phy_data; memset(p, 0, AT_REGS_LEN * sizeof(u32)); regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; regs_buff[0] = AT_READ_REG(hw, REG_VPD_CAP); regs_buff[1] = AT_READ_REG(hw, REG_SPI_FLASH_CTRL); regs_buff[2] = AT_READ_REG(hw, REG_SPI_FLASH_CONFIG); regs_buff[3] = AT_READ_REG(hw, REG_TWSI_CTRL); regs_buff[4] = AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL); regs_buff[5] = AT_READ_REG(hw, REG_MASTER_CTRL); regs_buff[6] = AT_READ_REG(hw, REG_MANUAL_TIMER_INIT); regs_buff[7] = AT_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT); regs_buff[8] = AT_READ_REG(hw, REG_GPHY_CTRL); regs_buff[9] = AT_READ_REG(hw, REG_CMBDISDMA_TIMER); regs_buff[10] = AT_READ_REG(hw, REG_IDLE_STATUS); regs_buff[11] = AT_READ_REG(hw, REG_MDIO_CTRL); regs_buff[12] = AT_READ_REG(hw, REG_SERDES_LOCK); regs_buff[13] = AT_READ_REG(hw, REG_MAC_CTRL); regs_buff[14] = AT_READ_REG(hw, REG_MAC_IPG_IFG); regs_buff[15] = AT_READ_REG(hw, REG_MAC_STA_ADDR); regs_buff[16] = AT_READ_REG(hw, REG_MAC_STA_ADDR+4); regs_buff[17] = AT_READ_REG(hw, REG_RX_HASH_TABLE); regs_buff[18] = AT_READ_REG(hw, REG_RX_HASH_TABLE+4); regs_buff[19] = AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL); regs_buff[20] = AT_READ_REG(hw, REG_MTU); regs_buff[21] = AT_READ_REG(hw, REG_WOL_CTRL); regs_buff[22] = AT_READ_REG(hw, REG_SRAM_TRD_ADDR); regs_buff[23] = AT_READ_REG(hw, REG_SRAM_TRD_LEN); regs_buff[24] = AT_READ_REG(hw, REG_SRAM_RXF_ADDR); regs_buff[25] = AT_READ_REG(hw, REG_SRAM_RXF_LEN); regs_buff[26] = AT_READ_REG(hw, REG_SRAM_TXF_ADDR); regs_buff[27] = AT_READ_REG(hw, REG_SRAM_TXF_LEN); regs_buff[28] = AT_READ_REG(hw, REG_SRAM_TCPH_ADDR); regs_buff[29] = AT_READ_REG(hw, REG_SRAM_PKTH_ADDR); atl1e_read_phy_reg(hw, MII_BMCR, &phy_data); regs_buff[73] = (u32)phy_data; atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); regs_buff[74] = (u32)phy_data; } static int atl1e_get_eeprom_len(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); if (!atl1e_check_eeprom_exist(&adapter->hw)) return AT_EEPROM_LEN; else return 0; } static int atl1e_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; u32 *eeprom_buff; int first_dword, last_dword; int ret_val = 0; int i; if (eeprom->len == 0) return -EINVAL; if (atl1e_check_eeprom_exist(hw)) /* not exist */ return -EINVAL; eeprom->magic = hw->vendor_id | (hw->device_id << 16); first_dword = eeprom->offset >> 2; last_dword = (eeprom->offset + eeprom->len - 1) >> 2; eeprom_buff = kmalloc_array(last_dword - first_dword + 1, sizeof(u32), GFP_KERNEL); if (eeprom_buff == NULL) return -ENOMEM; for (i = first_dword; i < last_dword; i++) { if (!atl1e_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) { kfree(eeprom_buff); return -EIO; } } memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), eeprom->len); kfree(eeprom_buff); return ret_val; } static int atl1e_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; u32 *eeprom_buff; u32 *ptr; int first_dword, last_dword; int ret_val = 0; int i; if (eeprom->len == 0) return -EOPNOTSUPP; if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) return -EINVAL; first_dword = eeprom->offset >> 2; last_dword = (eeprom->offset + eeprom->len - 1) >> 2; eeprom_buff = kmalloc(AT_EEPROM_LEN, GFP_KERNEL); if (eeprom_buff == NULL) return -ENOMEM; ptr = eeprom_buff; if (eeprom->offset & 3) { /* need read/modify/write of first changed EEPROM word */ /* only the second byte of the word is being modified */ if (!atl1e_read_eeprom(hw, first_dword * 4, &(eeprom_buff[0]))) { ret_val = -EIO; goto out; } ptr++; } if (((eeprom->offset + eeprom->len) & 3)) { /* need read/modify/write of last changed EEPROM word */ /* only the first byte of the word is being modified */ if (!atl1e_read_eeprom(hw, last_dword * 4, &(eeprom_buff[last_dword - first_dword]))) { ret_val = -EIO; goto out; } } /* Device's eeprom is always little-endian, word addressable */ memcpy(ptr, bytes, eeprom->len); for (i = 0; i < last_dword - first_dword + 1; i++) { if (!atl1e_write_eeprom(hw, ((first_dword + i) * 4), eeprom_buff[i])) { ret_val = -EIO; goto out; } } out: kfree(eeprom_buff); return ret_val; } static void atl1e_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct atl1e_adapter *adapter = netdev_priv(netdev); strscpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver)); strscpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version)); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); } static void atl1e_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl1e_adapter *adapter = netdev_priv(netdev); wol->supported = WAKE_MAGIC | WAKE_PHY; wol->wolopts = 0; if (adapter->wol & AT_WUFC_EX) wol->wolopts |= WAKE_UCAST; if (adapter->wol & AT_WUFC_MC) wol->wolopts |= WAKE_MCAST; if (adapter->wol & AT_WUFC_BC) wol->wolopts |= WAKE_BCAST; if (adapter->wol & AT_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; if (adapter->wol & AT_WUFC_LNKC) wol->wolopts |= WAKE_PHY; } static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl1e_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) return -EOPNOTSUPP; /* these settings will always override what we currently have */ adapter->wol = 0; if (wol->wolopts & WAKE_MAGIC) adapter->wol |= AT_WUFC_MAG; if (wol->wolopts & WAKE_PHY) adapter->wol |= AT_WUFC_LNKC; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); return 0; } static int atl1e_nway_reset(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) atl1e_reinit_locked(adapter); return 0; } static const struct ethtool_ops atl1e_ethtool_ops = { .get_drvinfo = atl1e_get_drvinfo, .get_regs_len = atl1e_get_regs_len, .get_regs = atl1e_get_regs, .get_wol = atl1e_get_wol, .set_wol = atl1e_set_wol, .get_msglevel = atl1e_get_msglevel, .nway_reset = atl1e_nway_reset, .get_link = ethtool_op_get_link, .get_eeprom_len = atl1e_get_eeprom_len, .get_eeprom = atl1e_get_eeprom, .set_eeprom = atl1e_set_eeprom, .get_link_ksettings = atl1e_get_link_ksettings, .set_link_ksettings = atl1e_set_link_ksettings, }; void atl1e_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &atl1e_ethtool_ops; }
linux-master
drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 2007 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. */ #include <linux/pci.h> #include <linux/delay.h> #include <linux/mii.h> #include <linux/crc32.h> #include "atl1e.h" /* * check_eeprom_exist * return 0 if eeprom exist */ int atl1e_check_eeprom_exist(struct atl1e_hw *hw) { u32 value; value = AT_READ_REG(hw, REG_SPI_FLASH_CTRL); if (value & SPI_FLASH_CTRL_EN_VPD) { value &= ~SPI_FLASH_CTRL_EN_VPD; AT_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); } value = AT_READ_REGW(hw, REG_PCIE_CAP_LIST); return ((value & 0xFF00) == 0x6C00) ? 0 : 1; } void atl1e_hw_set_mac_addr(struct atl1e_hw *hw) { u32 value; /* * 00-0B-6A-F6-00-DC * 0: 6AF600DC 1: 000B * low dword */ value = (((u32)hw->mac_addr[2]) << 24) | (((u32)hw->mac_addr[3]) << 16) | (((u32)hw->mac_addr[4]) << 8) | (((u32)hw->mac_addr[5])) ; AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); /* hight dword */ value = (((u32)hw->mac_addr[0]) << 8) | (((u32)hw->mac_addr[1])) ; AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); } /* * atl1e_get_permanent_address * return 0 if get valid mac address, */ static int atl1e_get_permanent_address(struct atl1e_hw *hw) { u32 addr[2]; u32 i; u32 twsi_ctrl_data; u8 eth_addr[ETH_ALEN]; if (is_valid_ether_addr(hw->perm_mac_addr)) return 0; /* init */ addr[0] = addr[1] = 0; if (!atl1e_check_eeprom_exist(hw)) { /* eeprom exist */ twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL); twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data); for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) { msleep(10); twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL); if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0) break; } if (i >= AT_TWSI_EEPROM_TIMEOUT) return AT_ERR_TIMEOUT; } /* maybe MAC-address is from BIOS */ addr[0] = AT_READ_REG(hw, REG_MAC_STA_ADDR); addr[1] = AT_READ_REG(hw, REG_MAC_STA_ADDR + 4); *(u32 *) &eth_addr[2] = swab32(addr[0]); *(u16 *) &eth_addr[0] = swab16(*(u16 *)&addr[1]); if (is_valid_ether_addr(eth_addr)) { memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); return 0; } return AT_ERR_EEPROM; } bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value) { return true; } bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value) { int i; u32 control; if (offset & 3) return false; /* address do not align */ AT_WRITE_REG(hw, REG_VPD_DATA, 0); control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; AT_WRITE_REG(hw, REG_VPD_CAP, control); for (i = 0; i < 10; i++) { msleep(2); control = AT_READ_REG(hw, REG_VPD_CAP); if (control & VPD_CAP_VPD_FLAG) break; } if (control & VPD_CAP_VPD_FLAG) { *p_value = AT_READ_REG(hw, REG_VPD_DATA); return true; } return false; /* timeout */ } void atl1e_force_ps(struct atl1e_hw *hw) { AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET); } /* * Reads the adapter's MAC address from the EEPROM * * hw - Struct containing variables accessed by shared code */ int atl1e_read_mac_addr(struct atl1e_hw *hw) { int err = 0; err = atl1e_get_permanent_address(hw); if (err) return AT_ERR_EEPROM; memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr)); return 0; } /* * atl1e_hash_mc_addr * purpose * set hash value for a multicast address */ u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr) { u32 crc32; u32 value = 0; int i; crc32 = ether_crc_le(6, mc_addr); for (i = 0; i < 32; i++) value |= (((crc32 >> i) & 1) << (31 - i)); return value; } /* * Sets the bit in the multicast table corresponding to the hash value. * hw - Struct containing variables accessed by shared code * hash_value - Multicast address hash value */ void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value) { u32 hash_bit, hash_reg; u32 mta; /* * The HASH Table is a register array of 2 32-bit registers. * It is treated like an array of 64 bits. We want to set * bit BitArray[hash_value]. So we figure out what register * the bit is in, read it, OR in the new bit, then write * back the new value. The register is determined by the * upper 7 bits of the hash value and the bit within that * register are determined by the lower 5 bits of the value. */ hash_reg = (hash_value >> 31) & 0x1; hash_bit = (hash_value >> 26) & 0x1F; mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); mta |= (1 << hash_bit); AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); } /* * Reads the value from a PHY register * hw - Struct containing variables accessed by shared code * reg_addr - address of the PHY register to read */ int atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data) { u32 val; int i; val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; AT_WRITE_REG(hw, REG_MDIO_CTRL, val); wmb(); for (i = 0; i < MDIO_WAIT_TIMES; i++) { udelay(2); val = AT_READ_REG(hw, REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; wmb(); } if (!(val & (MDIO_START | MDIO_BUSY))) { *phy_data = (u16)val; return 0; } return AT_ERR_PHY; } /* * Writes a value to a PHY register * hw - Struct containing variables accessed by shared code * reg_addr - address of the PHY register to write * data - data to write to the PHY */ int atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data) { int i; u32 val; val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | (reg_addr&MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | MDIO_SUP_PREAMBLE | MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; AT_WRITE_REG(hw, REG_MDIO_CTRL, val); wmb(); for (i = 0; i < MDIO_WAIT_TIMES; i++) { udelay(2); val = AT_READ_REG(hw, REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; wmb(); } if (!(val & (MDIO_START | MDIO_BUSY))) return 0; return AT_ERR_PHY; } /* * atl1e_init_pcie - init PCIE module */ static void atl1e_init_pcie(struct atl1e_hw *hw) { u32 value; /* comment 2lines below to save more power when sususpend value = LTSSM_TEST_MODE_DEF; AT_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value); */ /* pcie flow control mode change */ value = AT_READ_REG(hw, 0x1008); value |= 0x8000; AT_WRITE_REG(hw, 0x1008, value); } /* * Configures PHY autoneg and flow control advertisement settings * * hw - Struct containing variables accessed by shared code */ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw) { s32 ret_val; u16 mii_autoneg_adv_reg; u16 mii_1000t_ctrl_reg; if (0 != hw->mii_autoneg_adv_reg) return 0; /* Read the MII Auto-Neg Advertisement Register (Address 4/9). */ mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK; /* * Need to parse autoneg_advertised and set up * the appropriate PHY registers. First we will parse for * autoneg_advertised software override. Since we can advertise * a plethora of combinations, we need to check each bit * individually. */ /* * First we clear all the 10/100 mb speed bits in the Auto-Neg * Advertisement Register (Address 4) and the 1000 mb speed bits in * the 1000Base-T control Register (Address 9). */ mii_autoneg_adv_reg &= ~ADVERTISE_ALL; mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK; /* * Need to parse MediaType and setup the * appropriate PHY registers. */ switch (hw->media_type) { case MEDIA_TYPE_AUTO_SENSOR: mii_autoneg_adv_reg |= ADVERTISE_ALL; hw->autoneg_advertised = ADVERTISE_ALL; if (hw->nic_type == athr_l1e) { mii_1000t_ctrl_reg |= ADVERTISE_1000FULL; hw->autoneg_advertised |= ADVERTISE_1000_FULL; } break; case MEDIA_TYPE_100M_FULL: mii_autoneg_adv_reg |= ADVERTISE_100FULL; hw->autoneg_advertised = ADVERTISE_100_FULL; break; case MEDIA_TYPE_100M_HALF: mii_autoneg_adv_reg |= ADVERTISE_100_HALF; hw->autoneg_advertised = ADVERTISE_100_HALF; break; case MEDIA_TYPE_10M_FULL: mii_autoneg_adv_reg |= ADVERTISE_10_FULL; hw->autoneg_advertised = ADVERTISE_10_FULL; break; default: mii_autoneg_adv_reg |= ADVERTISE_10_HALF; hw->autoneg_advertised = ADVERTISE_10_HALF; break; } /* flow control fixed to enable all */ mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; ret_val = atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); if (ret_val) return ret_val; if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000, mii_1000t_ctrl_reg); if (ret_val) return ret_val; } return 0; } /* * Resets the PHY and make all config validate * * hw - Struct containing variables accessed by shared code * * Sets bit 15 and 12 of the MII control regiser (for F001 bug) */ int atl1e_phy_commit(struct atl1e_hw *hw) { struct atl1e_adapter *adapter = hw->adapter; int ret_val; u16 phy_data; phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART; ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data); if (ret_val) { u32 val; int i; /************************************** * pcie serdes link may be down ! **************************************/ for (i = 0; i < 25; i++) { msleep(1); val = AT_READ_REG(hw, REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; } if (0 != (val & (MDIO_START | MDIO_BUSY))) { netdev_err(adapter->netdev, "pcie linkdown at least for 25ms\n"); return ret_val; } netdev_err(adapter->netdev, "pcie linkup after %d ms\n", i); } return 0; } int atl1e_phy_init(struct atl1e_hw *hw) { struct atl1e_adapter *adapter = hw->adapter; s32 ret_val; u16 phy_val; if (hw->phy_configured) { if (hw->re_autoneg) { hw->re_autoneg = false; return atl1e_restart_autoneg(hw); } return 0; } /* RESET GPHY Core */ AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT); msleep(2); AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET); msleep(2); /* patches */ /* p1. eable hibernation mode */ ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0xB); if (ret_val) return ret_val; ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0xBC00); if (ret_val) return ret_val; /* p2. set Class A/B for all modes */ ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0); if (ret_val) return ret_val; phy_val = 0x02ef; /* remove Class AB */ /* phy_val = hw->emi_ca ? 0x02ef : 0x02df; */ ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, phy_val); if (ret_val) return ret_val; /* p3. 10B ??? */ ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x12); if (ret_val) return ret_val; ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x4C04); if (ret_val) return ret_val; /* p4. 1000T power */ ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x4); if (ret_val) return ret_val; ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x8BBB); if (ret_val) return ret_val; ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x5); if (ret_val) return ret_val; ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x2C46); if (ret_val) return ret_val; msleep(1); /*Enable PHY LinkChange Interrupt */ ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00); if (ret_val) { netdev_err(adapter->netdev, "Error enable PHY linkChange Interrupt\n"); return ret_val; } /* setup AutoNeg parameters */ ret_val = atl1e_phy_setup_autoneg_adv(hw); if (ret_val) { netdev_err(adapter->netdev, "Error Setting up Auto-Negotiation\n"); return ret_val; } /* SW.Reset & En-Auto-Neg to restart Auto-Neg*/ netdev_dbg(adapter->netdev, "Restarting Auto-Negotiation\n"); ret_val = atl1e_phy_commit(hw); if (ret_val) { netdev_err(adapter->netdev, "Error resetting the phy\n"); return ret_val; } hw->phy_configured = true; return 0; } /* * Reset the transmit and receive units; mask and clear all interrupts. * hw - Struct containing variables accessed by shared code * return : 0 or idle status (if error) */ int atl1e_reset_hw(struct atl1e_hw *hw) { struct atl1e_adapter *adapter = hw->adapter; struct pci_dev *pdev = adapter->pdev; u32 idle_status_data = 0; u16 pci_cfg_cmd_word = 0; int timeout = 0; /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ pci_read_config_word(pdev, PCI_REG_COMMAND, &pci_cfg_cmd_word); if ((pci_cfg_cmd_word & (CMD_IO_SPACE | CMD_MEMORY_SPACE | CMD_BUS_MASTER)) != (CMD_IO_SPACE | CMD_MEMORY_SPACE | CMD_BUS_MASTER)) { pci_cfg_cmd_word |= (CMD_IO_SPACE | CMD_MEMORY_SPACE | CMD_BUS_MASTER); pci_write_config_word(pdev, PCI_REG_COMMAND, pci_cfg_cmd_word); } /* * Issue Soft Reset to the MAC. This will reset the chip's * transmit, receive, DMA. It will not effect * the current PCI configuration. The global reset bit is self- * clearing, and should clear within a microsecond. */ AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE | MASTER_CTRL_SOFT_RST); wmb(); msleep(1); /* Wait at least 10ms for All module to be Idle */ for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { idle_status_data = AT_READ_REG(hw, REG_IDLE_STATUS); if (idle_status_data == 0) break; msleep(1); cpu_relax(); } if (timeout >= AT_HW_MAX_IDLE_DELAY) { netdev_err(adapter->netdev, "MAC state machine can't be idle since disabled for 10ms second\n"); return AT_ERR_TIMEOUT; } return 0; } /* * Performs basic configuration of the adapter. * * hw - Struct containing variables accessed by shared code * Assumes that the controller has previously been reset and is in a * post-reset uninitialized state. Initializes multicast table, * and Calls routines to setup link * Leaves the transmit and receive units disabled and uninitialized. */ int atl1e_init_hw(struct atl1e_hw *hw) { s32 ret_val = 0; atl1e_init_pcie(hw); /* Zero out the Multicast HASH table */ /* clear the old settings from the multicast hash table */ AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); ret_val = atl1e_phy_init(hw); return ret_val; } /* * Detects the current speed and duplex settings of the hardware. * * hw - Struct containing variables accessed by shared code * speed - Speed of the connection * duplex - Duplex setting of the connection */ int atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex) { int err; u16 phy_data; /* Read PHY Specific Status Register (17) */ err = atl1e_read_phy_reg(hw, MII_AT001_PSSR, &phy_data); if (err) return err; if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED)) return AT_ERR_PHY_RES; switch (phy_data & MII_AT001_PSSR_SPEED) { case MII_AT001_PSSR_1000MBS: *speed = SPEED_1000; break; case MII_AT001_PSSR_100MBS: *speed = SPEED_100; break; case MII_AT001_PSSR_10MBS: *speed = SPEED_10; break; default: return AT_ERR_PHY_SPEED; } if (phy_data & MII_AT001_PSSR_DPLX) *duplex = FULL_DUPLEX; else *duplex = HALF_DUPLEX; return 0; } int atl1e_restart_autoneg(struct atl1e_hw *hw) { int err = 0; err = atl1e_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); if (err) return err; if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { err = atl1e_write_phy_reg(hw, MII_CTRL1000, hw->mii_1000t_ctrl_reg); if (err) return err; } err = atl1e_write_phy_reg(hw, MII_BMCR, BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART); return err; }
linux-master
drivers/net/ethernet/atheros/atl1e/atl1e_hw.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 2007 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. */ #include "atl1e.h" char atl1e_driver_name[] = "ATL1E"; #define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026 /* * atl1e_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ static const struct pci_device_id atl1e_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)}, {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)}, /* required last entry */ { 0 } }; MODULE_DEVICE_TABLE(pci, atl1e_pci_tbl); MODULE_AUTHOR("Atheros Corporation, <[email protected]>, Jie Yang <[email protected]>"); MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver"); MODULE_LICENSE("GPL"); static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter); static const u16 atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = { {REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD}, {REG_HOST_RXF1_PAGE0_VLD, REG_HOST_RXF1_PAGE1_VLD}, {REG_HOST_RXF2_PAGE0_VLD, REG_HOST_RXF2_PAGE1_VLD}, {REG_HOST_RXF3_PAGE0_VLD, REG_HOST_RXF3_PAGE1_VLD} }; static const u16 atl1e_rx_page_hi_addr_regs[AT_MAX_RECEIVE_QUEUE] = { REG_RXF0_BASE_ADDR_HI, REG_RXF1_BASE_ADDR_HI, REG_RXF2_BASE_ADDR_HI, REG_RXF3_BASE_ADDR_HI }; static const u16 atl1e_rx_page_lo_addr_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = { {REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO}, {REG_HOST_RXF1_PAGE0_LO, REG_HOST_RXF1_PAGE1_LO}, {REG_HOST_RXF2_PAGE0_LO, REG_HOST_RXF2_PAGE1_LO}, {REG_HOST_RXF3_PAGE0_LO, REG_HOST_RXF3_PAGE1_LO} }; static const u16 atl1e_rx_page_write_offset_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = { {REG_HOST_RXF0_MB0_LO, REG_HOST_RXF0_MB1_LO}, {REG_HOST_RXF1_MB0_LO, REG_HOST_RXF1_MB1_LO}, {REG_HOST_RXF2_MB0_LO, REG_HOST_RXF2_MB1_LO}, {REG_HOST_RXF3_MB0_LO, REG_HOST_RXF3_MB1_LO} }; static const u16 atl1e_pay_load_size[] = { 128, 256, 512, 1024, 2048, 4096, }; /** * atl1e_irq_enable - Enable default interrupt generation settings * @adapter: board private structure */ static inline void atl1e_irq_enable(struct atl1e_adapter *adapter) { if (likely(atomic_dec_and_test(&adapter->irq_sem))) { AT_WRITE_REG(&adapter->hw, REG_ISR, 0); AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK); AT_WRITE_FLUSH(&adapter->hw); } } /** * atl1e_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure */ static inline void atl1e_irq_disable(struct atl1e_adapter *adapter) { atomic_inc(&adapter->irq_sem); AT_WRITE_REG(&adapter->hw, REG_IMR, 0); AT_WRITE_FLUSH(&adapter->hw); synchronize_irq(adapter->pdev->irq); } /** * atl1e_irq_reset - reset interrupt confiure on the NIC * @adapter: board private structure */ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter) { atomic_set(&adapter->irq_sem, 0); AT_WRITE_REG(&adapter->hw, REG_ISR, 0); AT_WRITE_REG(&adapter->hw, REG_IMR, 0); AT_WRITE_FLUSH(&adapter->hw); } /** * atl1e_phy_config - Timer Call-back * @t: timer list containing pointer to netdev cast into an unsigned long */ static void atl1e_phy_config(struct timer_list *t) { struct atl1e_adapter *adapter = from_timer(adapter, t, phy_config_timer); struct atl1e_hw *hw = &adapter->hw; unsigned long flags; spin_lock_irqsave(&adapter->mdio_lock, flags); atl1e_restart_autoneg(hw); spin_unlock_irqrestore(&adapter->mdio_lock, flags); } void atl1e_reinit_locked(struct atl1e_adapter *adapter) { while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) msleep(1); atl1e_down(adapter); atl1e_up(adapter); clear_bit(__AT_RESETTING, &adapter->flags); } static void atl1e_reset_task(struct work_struct *work) { struct atl1e_adapter *adapter; adapter = container_of(work, struct atl1e_adapter, reset_task); atl1e_reinit_locked(adapter); } static int atl1e_check_link(struct atl1e_adapter *adapter) { struct atl1e_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; int err = 0; u16 speed, duplex, phy_data; /* MII_BMSR must read twice */ atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); if ((phy_data & BMSR_LSTATUS) == 0) { /* link down */ if (netif_carrier_ok(netdev)) { /* old link state: Up */ u32 value; /* disable rx */ value = AT_READ_REG(hw, REG_MAC_CTRL); value &= ~MAC_CTRL_RX_EN; AT_WRITE_REG(hw, REG_MAC_CTRL, value); adapter->link_speed = SPEED_0; netif_carrier_off(netdev); netif_stop_queue(netdev); } } else { /* Link Up */ err = atl1e_get_speed_and_duplex(hw, &speed, &duplex); if (unlikely(err)) return err; /* link result is our setting */ if (adapter->link_speed != speed || adapter->link_duplex != duplex) { adapter->link_speed = speed; adapter->link_duplex = duplex; atl1e_setup_mac_ctrl(adapter); netdev_info(netdev, "NIC Link is Up <%d Mbps %s Duplex>\n", adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half"); } if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ netif_carrier_on(netdev); netif_wake_queue(netdev); } } return 0; } /** * atl1e_link_chg_task - deal with link change event Out of interrupt context * @work: work struct with driver info */ static void atl1e_link_chg_task(struct work_struct *work) { struct atl1e_adapter *adapter; unsigned long flags; adapter = container_of(work, struct atl1e_adapter, link_chg_task); spin_lock_irqsave(&adapter->mdio_lock, flags); atl1e_check_link(adapter); spin_unlock_irqrestore(&adapter->mdio_lock, flags); } static void atl1e_link_chg_event(struct atl1e_adapter *adapter) { struct net_device *netdev = adapter->netdev; u16 phy_data = 0; u16 link_up = 0; spin_lock(&adapter->mdio_lock); atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); spin_unlock(&adapter->mdio_lock); link_up = phy_data & BMSR_LSTATUS; /* notify upper layer link down ASAP */ if (!link_up) { if (netif_carrier_ok(netdev)) { /* old link state: Up */ netdev_info(netdev, "NIC Link is Down\n"); adapter->link_speed = SPEED_0; netif_stop_queue(netdev); } } schedule_work(&adapter->link_chg_task); } static void atl1e_del_timer(struct atl1e_adapter *adapter) { del_timer_sync(&adapter->phy_config_timer); } static void atl1e_cancel_work(struct atl1e_adapter *adapter) { cancel_work_sync(&adapter->reset_task); cancel_work_sync(&adapter->link_chg_task); } /** * atl1e_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure * @txqueue: the index of the hanging queue */ static void atl1e_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct atl1e_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ schedule_work(&adapter->reset_task); } /** * atl1e_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * * The set_multi entry point is called whenever the multicast address * list or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper multicast, * promiscuous mode, and all-multi behavior. */ static void atl1e_set_multi(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; u32 mac_ctrl_data = 0; u32 hash_value; /* Check for Promiscuous and All Multicast modes */ mac_ctrl_data = AT_READ_REG(hw, REG_MAC_CTRL); if (netdev->flags & IFF_PROMISC) { mac_ctrl_data |= MAC_CTRL_PROMIS_EN; } else if (netdev->flags & IFF_ALLMULTI) { mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN; } else { mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); } AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); /* clear the old settings from the multicast hash table */ AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); /* comoute mc addresses' hash value ,and put it into hash table */ netdev_for_each_mc_addr(ha, netdev) { hash_value = atl1e_hash_mc_addr(hw, ha->addr); atl1e_hash_set(hw, hash_value); } } static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data) { if (features & NETIF_F_RXALL) { /* enable RX of ALL frames */ *mac_ctrl_data |= MAC_CTRL_DBG; } else { /* disable RX of ALL frames */ *mac_ctrl_data &= ~MAC_CTRL_DBG; } } static void atl1e_rx_mode(struct net_device *netdev, netdev_features_t features) { struct atl1e_adapter *adapter = netdev_priv(netdev); u32 mac_ctrl_data = 0; netdev_dbg(adapter->netdev, "%s\n", __func__); atl1e_irq_disable(adapter); mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL); __atl1e_rx_mode(features, &mac_ctrl_data); AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); atl1e_irq_enable(adapter); } static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) { if (features & NETIF_F_HW_VLAN_CTAG_RX) { /* enable VLAN tag insert/strip */ *mac_ctrl_data |= MAC_CTRL_RMV_VLAN; } else { /* disable VLAN tag insert/strip */ *mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN; } } static void atl1e_vlan_mode(struct net_device *netdev, netdev_features_t features) { struct atl1e_adapter *adapter = netdev_priv(netdev); u32 mac_ctrl_data = 0; netdev_dbg(adapter->netdev, "%s\n", __func__); atl1e_irq_disable(adapter); mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL); __atl1e_vlan_mode(features, &mac_ctrl_data); AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); atl1e_irq_enable(adapter); } static void atl1e_restore_vlan(struct atl1e_adapter *adapter) { netdev_dbg(adapter->netdev, "%s\n", __func__); atl1e_vlan_mode(adapter->netdev, adapter->netdev->features); } /** * atl1e_set_mac_addr - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure */ static int atl1e_set_mac_addr(struct net_device *netdev, void *p) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (netif_running(netdev)) return -EBUSY; eth_hw_addr_set(netdev, addr->sa_data); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atl1e_hw_set_mac_addr(&adapter->hw); return 0; } static netdev_features_t atl1e_fix_features(struct net_device *netdev, netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ if (features & NETIF_F_HW_VLAN_CTAG_RX) features |= NETIF_F_HW_VLAN_CTAG_TX; else features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } static int atl1e_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t changed = netdev->features ^ features; if (changed & NETIF_F_HW_VLAN_CTAG_RX) atl1e_vlan_mode(netdev, features); if (changed & NETIF_F_RXALL) atl1e_rx_mode(netdev, features); return 0; } /** * atl1e_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure */ static int atl1e_change_mtu(struct net_device *netdev, int new_mtu) { struct atl1e_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; /* set MTU */ if (netif_running(netdev)) { while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) msleep(1); netdev->mtu = new_mtu; adapter->hw.max_frame_size = new_mtu; adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3; atl1e_down(adapter); atl1e_up(adapter); clear_bit(__AT_RESETTING, &adapter->flags); } return 0; } /* * caller should hold mdio_lock */ static int atl1e_mdio_read(struct net_device *netdev, int phy_id, int reg_num) { struct atl1e_adapter *adapter = netdev_priv(netdev); u16 result; atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result); return result; } static void atl1e_mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val) { struct atl1e_adapter *adapter = netdev_priv(netdev); if (atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val)) netdev_err(netdev, "write phy register failed\n"); } static int atl1e_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct mii_ioctl_data *data = if_mii(ifr); unsigned long flags; int retval = 0; if (!netif_running(netdev)) return -EINVAL; spin_lock_irqsave(&adapter->mdio_lock, flags); switch (cmd) { case SIOCGMIIPHY: data->phy_id = 0; break; case SIOCGMIIREG: if (atl1e_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, &data->val_out)) { retval = -EIO; goto out; } break; case SIOCSMIIREG: if (data->reg_num & ~(0x1F)) { retval = -EFAULT; goto out; } netdev_dbg(adapter->netdev, "<atl1e_mii_ioctl> write %x %x\n", data->reg_num, data->val_in); if (atl1e_write_phy_reg(&adapter->hw, data->reg_num, data->val_in)) { retval = -EIO; goto out; } break; default: retval = -EOPNOTSUPP; break; } out: spin_unlock_irqrestore(&adapter->mdio_lock, flags); return retval; } static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return atl1e_mii_ioctl(netdev, ifr, cmd); default: return -EOPNOTSUPP; } } static void atl1e_setup_pcicmd(struct pci_dev *pdev) { u16 cmd; pci_read_config_word(pdev, PCI_COMMAND, &cmd); cmd &= ~(PCI_COMMAND_INTX_DISABLE | PCI_COMMAND_IO); cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); pci_write_config_word(pdev, PCI_COMMAND, cmd); /* * some motherboards BIOS(PXE/EFI) driver may set PME * while they transfer control to OS (Windows/Linux) * so we should clear this bit before NIC work normally */ pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0); msleep(1); } /** * atl1e_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * */ static int atl1e_alloc_queues(struct atl1e_adapter *adapter) { return 0; } /** * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter) * @adapter: board private structure to initialize * * atl1e_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). */ static int atl1e_sw_init(struct atl1e_adapter *adapter) { struct atl1e_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; u32 phy_status_data = 0; adapter->wol = 0; adapter->link_speed = SPEED_0; /* hardware init */ adapter->link_duplex = FULL_DUPLEX; adapter->num_rx_queues = 1; /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_id = pdev->subsystem_device; hw->revision_id = pdev->revision; pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); /* nic type */ if (hw->revision_id >= 0xF0) { hw->nic_type = athr_l2e_revB; } else { if (phy_status_data & PHY_STATUS_100M) hw->nic_type = athr_l1e; else hw->nic_type = athr_l2e_revA; } phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); if (phy_status_data & PHY_STATUS_EMI_CA) hw->emi_ca = true; else hw->emi_ca = false; hw->phy_configured = false; hw->preamble_len = 7; hw->max_frame_size = adapter->netdev->mtu; hw->rx_jumbo_th = (hw->max_frame_size + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN + 7) >> 3; hw->rrs_type = atl1e_rrs_disable; hw->indirect_tab = 0; hw->base_cpu = 0; /* need confirm */ hw->ict = 50000; /* 100ms */ hw->smb_timer = 200000; /* 200ms */ hw->tpd_burst = 5; hw->rrd_thresh = 1; hw->tpd_thresh = adapter->tx_ring.count / 2; hw->rx_count_down = 4; /* 2us resolution */ hw->tx_count_down = hw->imt * 4 / 3; hw->dmar_block = atl1e_dma_req_1024; hw->dmaw_block = atl1e_dma_req_1024; hw->dmar_dly_cnt = 15; hw->dmaw_dly_cnt = 4; if (atl1e_alloc_queues(adapter)) { netdev_err(adapter->netdev, "Unable to allocate memory for queues\n"); return -ENOMEM; } atomic_set(&adapter->irq_sem, 1); spin_lock_init(&adapter->mdio_lock); set_bit(__AT_DOWN, &adapter->flags); return 0; } /** * atl1e_clean_tx_ring - Free Tx-skb * @adapter: board private structure */ static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter) { struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; struct atl1e_tx_buffer *tx_buffer = NULL; struct pci_dev *pdev = adapter->pdev; u16 index, ring_count; if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL) return; ring_count = tx_ring->count; /* first unmmap dma */ for (index = 0; index < ring_count; index++) { tx_buffer = &tx_ring->tx_buffer[index]; if (tx_buffer->dma) { if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) dma_unmap_single(&pdev->dev, tx_buffer->dma, tx_buffer->length, DMA_TO_DEVICE); else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) dma_unmap_page(&pdev->dev, tx_buffer->dma, tx_buffer->length, DMA_TO_DEVICE); tx_buffer->dma = 0; } } /* second free skb */ for (index = 0; index < ring_count; index++) { tx_buffer = &tx_ring->tx_buffer[index]; if (tx_buffer->skb) { dev_kfree_skb_any(tx_buffer->skb); tx_buffer->skb = NULL; } } /* Zero out Tx-buffers */ memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) * ring_count); memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) * ring_count); } /** * atl1e_clean_rx_ring - Free rx-reservation skbs * @adapter: board private structure */ static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter) { struct atl1e_rx_ring *rx_ring = &adapter->rx_ring; struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc; u16 i, j; if (adapter->ring_vir_addr == NULL) return; /* Zero out the descriptor ring */ for (i = 0; i < adapter->num_rx_queues; i++) { for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { if (rx_page_desc[i].rx_page[j].addr != NULL) { memset(rx_page_desc[i].rx_page[j].addr, 0, rx_ring->real_page_size); } } } } static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size) { *ring_size = ((u32)(adapter->tx_ring.count * sizeof(struct atl1e_tpd_desc) + 7 /* tx ring, qword align */ + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues + 31 /* rx ring, 32 bytes align */ + (1 + AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues) * sizeof(u32) + 3)); /* tx, rx cmd, dword align */ } static void atl1e_init_ring_resources(struct atl1e_adapter *adapter) { struct atl1e_rx_ring *rx_ring = NULL; rx_ring = &adapter->rx_ring; rx_ring->real_page_size = adapter->rx_ring.page_size + adapter->hw.max_frame_size + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; rx_ring->real_page_size = roundup(rx_ring->real_page_size, 32); atl1e_cal_ring_size(adapter, &adapter->ring_size); adapter->ring_vir_addr = NULL; adapter->rx_ring.desc = NULL; rwlock_init(&adapter->tx_ring.tx_lock); } /* * Read / Write Ptr Initialize: */ static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter) { struct atl1e_tx_ring *tx_ring = NULL; struct atl1e_rx_ring *rx_ring = NULL; struct atl1e_rx_page_desc *rx_page_desc = NULL; int i, j; tx_ring = &adapter->tx_ring; rx_ring = &adapter->rx_ring; rx_page_desc = rx_ring->rx_page_desc; tx_ring->next_to_use = 0; atomic_set(&tx_ring->next_to_clean, 0); for (i = 0; i < adapter->num_rx_queues; i++) { rx_page_desc[i].rx_using = 0; rx_page_desc[i].rx_nxseq = 0; for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { *rx_page_desc[i].rx_page[j].write_offset_addr = 0; rx_page_desc[i].rx_page[j].read_offset = 0; } } } /** * atl1e_free_ring_resources - Free Tx / RX descriptor Resources * @adapter: board private structure * * Free all transmit software resources */ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; atl1e_clean_tx_ring(adapter); atl1e_clean_rx_ring(adapter); if (adapter->ring_vir_addr) { dma_free_coherent(&pdev->dev, adapter->ring_size, adapter->ring_vir_addr, adapter->ring_dma); adapter->ring_vir_addr = NULL; } if (adapter->tx_ring.tx_buffer) { kfree(adapter->tx_ring.tx_buffer); adapter->tx_ring.tx_buffer = NULL; } } /** * atl1e_setup_ring_resources - allocate Tx / RX descriptor resources * @adapter: board private structure * * Return 0 on success, negative on failure */ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct atl1e_tx_ring *tx_ring; struct atl1e_rx_ring *rx_ring; struct atl1e_rx_page_desc *rx_page_desc; int size, i, j; u32 offset = 0; int err = 0; if (adapter->ring_vir_addr != NULL) return 0; /* alloced already */ tx_ring = &adapter->tx_ring; rx_ring = &adapter->rx_ring; /* real ring DMA buffer */ size = adapter->ring_size; adapter->ring_vir_addr = dma_alloc_coherent(&pdev->dev, adapter->ring_size, &adapter->ring_dma, GFP_KERNEL); if (adapter->ring_vir_addr == NULL) { netdev_err(adapter->netdev, "dma_alloc_coherent failed, size = D%d\n", size); return -ENOMEM; } rx_page_desc = rx_ring->rx_page_desc; /* Init TPD Ring */ tx_ring->dma = roundup(adapter->ring_dma, 8); offset = tx_ring->dma - adapter->ring_dma; tx_ring->desc = adapter->ring_vir_addr + offset; size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count); tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL); if (tx_ring->tx_buffer == NULL) { err = -ENOMEM; goto failed; } /* Init RXF-Pages */ offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count); offset = roundup(offset, 32); for (i = 0; i < adapter->num_rx_queues; i++) { for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { rx_page_desc[i].rx_page[j].dma = adapter->ring_dma + offset; rx_page_desc[i].rx_page[j].addr = adapter->ring_vir_addr + offset; offset += rx_ring->real_page_size; } } /* Init CMB dma address */ tx_ring->cmb_dma = adapter->ring_dma + offset; tx_ring->cmb = adapter->ring_vir_addr + offset; offset += sizeof(u32); for (i = 0; i < adapter->num_rx_queues; i++) { for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { rx_page_desc[i].rx_page[j].write_offset_dma = adapter->ring_dma + offset; rx_page_desc[i].rx_page[j].write_offset_addr = adapter->ring_vir_addr + offset; offset += sizeof(u32); } } if (unlikely(offset > adapter->ring_size)) { netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n", offset, adapter->ring_size); err = -1; goto failed; } return 0; failed: if (adapter->ring_vir_addr != NULL) { dma_free_coherent(&pdev->dev, adapter->ring_size, adapter->ring_vir_addr, adapter->ring_dma); adapter->ring_vir_addr = NULL; } return err; } static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter) { struct atl1e_hw *hw = &adapter->hw; struct atl1e_rx_ring *rx_ring = &adapter->rx_ring; struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; struct atl1e_rx_page_desc *rx_page_desc = NULL; int i, j; AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, (u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32)); AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO, (u32)((tx_ring->dma) & AT_DMA_LO_ADDR_MASK)); AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count)); AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO, (u32)((tx_ring->cmb_dma) & AT_DMA_LO_ADDR_MASK)); rx_page_desc = rx_ring->rx_page_desc; /* RXF Page Physical address / Page Length */ for (i = 0; i < AT_MAX_RECEIVE_QUEUE; i++) { AT_WRITE_REG(hw, atl1e_rx_page_hi_addr_regs[i], (u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32)); for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { u32 page_phy_addr; u32 offset_phy_addr; page_phy_addr = rx_page_desc[i].rx_page[j].dma; offset_phy_addr = rx_page_desc[i].rx_page[j].write_offset_dma; AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[i][j], page_phy_addr & AT_DMA_LO_ADDR_MASK); AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[i][j], offset_phy_addr & AT_DMA_LO_ADDR_MASK); AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[i][j], 1); } } /* Page Length */ AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size); /* Load all of base address above */ AT_WRITE_REG(hw, REG_LOAD_PTR, 1); } static inline void atl1e_configure_tx(struct atl1e_adapter *adapter) { struct atl1e_hw *hw = &adapter->hw; u32 dev_ctrl_data = 0; u32 max_pay_load = 0; u32 jumbo_thresh = 0; u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */ /* configure TXQ param */ if (hw->nic_type != athr_l2e_revB) { extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; if (hw->max_frame_size <= 1500) { jumbo_thresh = hw->max_frame_size + extra_size; } else if (hw->max_frame_size < 6*1024) { jumbo_thresh = (hw->max_frame_size + extra_size) * 2 / 3; } else { jumbo_thresh = (hw->max_frame_size + extra_size) / 2; } AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3); } dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL); max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) & DEVICE_CTRL_MAX_PAYLOAD_MASK; hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block); max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) & DEVICE_CTRL_MAX_RREQ_SZ_MASK; hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); if (hw->nic_type != athr_l2e_revB) AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2, atl1e_pay_load_size[hw->dmar_block]); /* enable TXQ */ AT_WRITE_REGW(hw, REG_TXQ_CTRL, (((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK) << TXQ_CTRL_NUM_TPD_BURST_SHIFT) | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN); } static inline void atl1e_configure_rx(struct atl1e_adapter *adapter) { struct atl1e_hw *hw = &adapter->hw; u32 rxf_len = 0; u32 rxf_low = 0; u32 rxf_high = 0; u32 rxf_thresh_data = 0; u32 rxq_ctrl_data = 0; if (hw->nic_type != athr_l2e_revB) { AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM, (u16)((hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) << RXQ_JMBOSZ_TH_SHIFT | (1 & RXQ_JMBO_LKAH_MASK) << RXQ_JMBO_LKAH_SHIFT)); rxf_len = AT_READ_REG(hw, REG_SRAM_RXF_LEN); rxf_high = rxf_len * 4 / 5; rxf_low = rxf_len / 5; rxf_thresh_data = ((rxf_high & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data); } /* RRS */ AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab); AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu); if (hw->rrs_type & atl1e_rrs_ipv4) rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4; if (hw->rrs_type & atl1e_rrs_ipv4_tcp) rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4_TCP; if (hw->rrs_type & atl1e_rrs_ipv6) rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6; if (hw->rrs_type & atl1e_rrs_ipv6_tcp) rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6_TCP; if (hw->rrs_type != atl1e_rrs_disable) rxq_ctrl_data |= (RXQ_CTRL_HASH_ENABLE | RXQ_CTRL_RSS_MODE_MQUESINT); rxq_ctrl_data |= RXQ_CTRL_IPV6_XSUM_VERIFY_EN | RXQ_CTRL_PBA_ALIGN_32 | RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); } static inline void atl1e_configure_dma(struct atl1e_adapter *adapter) { struct atl1e_hw *hw = &adapter->hw; u32 dma_ctrl_data = 0; dma_ctrl_data = DMA_CTRL_RXCMB_EN; dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) << DMA_CTRL_DMAR_BURST_LEN_SHIFT; dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) << DMA_CTRL_DMAW_BURST_LEN_SHIFT; dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER; dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK) << DMA_CTRL_DMAR_DLY_CNT_SHIFT; dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK) << DMA_CTRL_DMAW_DLY_CNT_SHIFT; AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); } static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter) { u32 value; struct atl1e_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; /* Config MAC CTRL Register */ value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN ; if (FULL_DUPLEX == adapter->link_duplex) value |= MAC_CTRL_DUPLX; value |= ((u32)((SPEED_1000 == adapter->link_speed) ? MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); value |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); __atl1e_vlan_mode(netdev->features, &value); value |= MAC_CTRL_BC_EN; if (netdev->flags & IFF_PROMISC) value |= MAC_CTRL_PROMIS_EN; if (netdev->flags & IFF_ALLMULTI) value |= MAC_CTRL_MC_ALL_EN; if (netdev->features & NETIF_F_RXALL) value |= MAC_CTRL_DBG; AT_WRITE_REG(hw, REG_MAC_CTRL, value); } /** * atl1e_configure - Configure Transmit&Receive Unit after Reset * @adapter: board private structure * * Configure the Tx /Rx unit of the MAC after a reset. */ static int atl1e_configure(struct atl1e_adapter *adapter) { struct atl1e_hw *hw = &adapter->hw; u32 intr_status_data = 0; /* clear interrupt status */ AT_WRITE_REG(hw, REG_ISR, ~0); /* 1. set MAC Address */ atl1e_hw_set_mac_addr(hw); /* 2. Init the Multicast HASH table done by set_muti */ /* 3. Clear any WOL status */ AT_WRITE_REG(hw, REG_WOL_CTRL, 0); /* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr * TPD Ring/SMB/RXF0 Page CMBs, they use the same * High 32bits memory */ atl1e_configure_des_ring(adapter); /* 5. set Interrupt Moderator Timer */ AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, hw->imt); AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, hw->imt); AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE | MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN); /* 6. rx/tx threshold to trig interrupt */ AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, hw->rrd_thresh); AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, hw->tpd_thresh); AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, hw->rx_count_down); AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, hw->tx_count_down); /* 7. set Interrupt Clear Timer */ AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, hw->ict); /* 8. set MTU */ AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); /* 9. config TXQ early tx threshold */ atl1e_configure_tx(adapter); /* 10. config RXQ */ atl1e_configure_rx(adapter); /* 11. config DMA Engine */ atl1e_configure_dma(adapter); /* 12. smb timer to trig interrupt */ AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, hw->smb_timer); intr_status_data = AT_READ_REG(hw, REG_ISR); if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) { netdev_err(adapter->netdev, "atl1e_configure failed, PCIE phy link down\n"); return -1; } AT_WRITE_REG(hw, REG_ISR, 0x7fffffff); return 0; } /** * atl1e_get_stats - Get System Network Statistics * @netdev: network interface device structure * * Returns the address of the device statistics structure. * The statistics are actually updated from the timer callback. */ static struct net_device_stats *atl1e_get_stats(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw_stats *hw_stats = &adapter->hw_stats; struct net_device_stats *net_stats = &netdev->stats; net_stats->rx_bytes = hw_stats->rx_byte_cnt; net_stats->tx_bytes = hw_stats->tx_byte_cnt; net_stats->multicast = hw_stats->rx_mcast; net_stats->collisions = hw_stats->tx_1_col + hw_stats->tx_2_col + hw_stats->tx_late_col + hw_stats->tx_abort_col; net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err + hw_stats->rx_len_err + hw_stats->rx_sz_ov + hw_stats->rx_rrd_ov + hw_stats->rx_align_err + hw_stats->rx_rxf_ov; net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; net_stats->rx_length_errors = hw_stats->rx_len_err; net_stats->rx_crc_errors = hw_stats->rx_fcs_err; net_stats->rx_frame_errors = hw_stats->rx_align_err; net_stats->rx_dropped = hw_stats->rx_rrd_ov; net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col + hw_stats->tx_underrun + hw_stats->tx_trunc; net_stats->tx_fifo_errors = hw_stats->tx_underrun; net_stats->tx_aborted_errors = hw_stats->tx_abort_col; net_stats->tx_window_errors = hw_stats->tx_late_col; net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; return net_stats; } static void atl1e_update_hw_stats(struct atl1e_adapter *adapter) { u16 hw_reg_addr = 0; unsigned long *stats_item = NULL; /* update rx status */ hw_reg_addr = REG_MAC_RX_STATUS_BIN; stats_item = &adapter->hw_stats.rx_ok; while (hw_reg_addr <= REG_MAC_RX_STATUS_END) { *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); stats_item++; hw_reg_addr += 4; } /* update tx status */ hw_reg_addr = REG_MAC_TX_STATUS_BIN; stats_item = &adapter->hw_stats.tx_ok; while (hw_reg_addr <= REG_MAC_TX_STATUS_END) { *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); stats_item++; hw_reg_addr += 4; } } static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter) { u16 phy_data; spin_lock(&adapter->mdio_lock); atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data); spin_unlock(&adapter->mdio_lock); } static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter) { struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; struct atl1e_tx_buffer *tx_buffer = NULL; u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX); u16 next_to_clean = atomic_read(&tx_ring->next_to_clean); while (next_to_clean != hw_next_to_clean) { tx_buffer = &tx_ring->tx_buffer[next_to_clean]; if (tx_buffer->dma) { if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) dma_unmap_single(&adapter->pdev->dev, tx_buffer->dma, tx_buffer->length, DMA_TO_DEVICE); else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma, tx_buffer->length, DMA_TO_DEVICE); tx_buffer->dma = 0; } if (tx_buffer->skb) { dev_consume_skb_irq(tx_buffer->skb); tx_buffer->skb = NULL; } if (++next_to_clean == tx_ring->count) next_to_clean = 0; } atomic_set(&tx_ring->next_to_clean, next_to_clean); if (netif_queue_stopped(adapter->netdev) && netif_carrier_ok(adapter->netdev)) { netif_wake_queue(adapter->netdev); } return true; } /** * atl1e_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure */ static irqreturn_t atl1e_intr(int irq, void *data) { struct net_device *netdev = data; struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; int max_ints = AT_MAX_INT_WORK; int handled = IRQ_NONE; u32 status; do { status = AT_READ_REG(hw, REG_ISR); if ((status & IMR_NORMAL_MASK) == 0 || (status & ISR_DIS_INT) != 0) { if (max_ints != AT_MAX_INT_WORK) handled = IRQ_HANDLED; break; } /* link event */ if (status & ISR_GPHY) atl1e_clear_phy_int(adapter); /* Ack ISR */ AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); handled = IRQ_HANDLED; /* check if PCIE PHY Link down */ if (status & ISR_PHY_LINKDOWN) { netdev_err(adapter->netdev, "pcie phy linkdown %x\n", status); if (netif_running(adapter->netdev)) { /* reset MAC */ atl1e_irq_reset(adapter); schedule_work(&adapter->reset_task); break; } } /* check if DMA read/write error */ if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { netdev_err(adapter->netdev, "PCIE DMA RW error (status = 0x%x)\n", status); atl1e_irq_reset(adapter); schedule_work(&adapter->reset_task); break; } if (status & ISR_SMB) atl1e_update_hw_stats(adapter); /* link event */ if (status & (ISR_GPHY | ISR_MANUAL)) { netdev->stats.tx_carrier_errors++; atl1e_link_chg_event(adapter); break; } /* transmit event */ if (status & ISR_TX_EVENT) atl1e_clean_tx_irq(adapter); if (status & ISR_RX_EVENT) { /* * disable rx interrupts, without * the synchronize_irq bit */ AT_WRITE_REG(hw, REG_IMR, IMR_NORMAL_MASK & ~ISR_RX_EVENT); AT_WRITE_FLUSH(hw); if (likely(napi_schedule_prep( &adapter->napi))) __napi_schedule(&adapter->napi); } } while (--max_ints > 0); /* re-enable Interrupt*/ AT_WRITE_REG(&adapter->hw, REG_ISR, 0); return handled; } static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter, struct sk_buff *skb, struct atl1e_recv_ret_status *prrs) { u8 *packet = (u8 *)(prrs + 1); struct iphdr *iph; u16 head_len = ETH_HLEN; u16 pkt_flags; u16 err_flags; skb_checksum_none_assert(skb); pkt_flags = prrs->pkt_flag; err_flags = prrs->err_flag; if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) && ((pkt_flags & RRS_IS_TCP) || (pkt_flags & RRS_IS_UDP))) { if (pkt_flags & RRS_IS_IPV4) { if (pkt_flags & RRS_IS_802_3) head_len += 8; iph = (struct iphdr *) (packet + head_len); if (iph->frag_off != 0 && !(pkt_flags & RRS_IS_IP_DF)) goto hw_xsum; } if (!(err_flags & (RRS_ERR_IP_CSUM | RRS_ERR_L4_CSUM))) { skb->ip_summed = CHECKSUM_UNNECESSARY; return; } } hw_xsum : return; } static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter, u8 que) { struct atl1e_rx_page_desc *rx_page_desc = (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc; u8 rx_using = rx_page_desc[que].rx_using; return &(rx_page_desc[que].rx_page[rx_using]); } static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, int *work_done, int work_to_do) { struct net_device *netdev = adapter->netdev; struct atl1e_rx_ring *rx_ring = &adapter->rx_ring; struct atl1e_rx_page_desc *rx_page_desc = (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc; struct sk_buff *skb = NULL; struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter, que); u32 packet_size, write_offset; struct atl1e_recv_ret_status *prrs; write_offset = *(rx_page->write_offset_addr); if (likely(rx_page->read_offset < write_offset)) { do { if (*work_done >= work_to_do) break; (*work_done)++; /* get new packet's rrs */ prrs = (struct atl1e_recv_ret_status *) (rx_page->addr + rx_page->read_offset); /* check sequence number */ if (prrs->seq_num != rx_page_desc[que].rx_nxseq) { netdev_err(netdev, "rx sequence number error (rx=%d) (expect=%d)\n", prrs->seq_num, rx_page_desc[que].rx_nxseq); rx_page_desc[que].rx_nxseq++; /* just for debug use */ AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0, (((u32)prrs->seq_num) << 16) | rx_page_desc[que].rx_nxseq); goto fatal_err; } rx_page_desc[que].rx_nxseq++; /* error packet */ if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) && !(netdev->features & NETIF_F_RXALL)) { if (prrs->err_flag & (RRS_ERR_BAD_CRC | RRS_ERR_DRIBBLE | RRS_ERR_CODE | RRS_ERR_TRUNC)) { /* hardware error, discard this packet*/ netdev_err(netdev, "rx packet desc error %x\n", *((u32 *)prrs + 1)); goto skip_pkt; } } packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & RRS_PKT_SIZE_MASK); if (likely(!(netdev->features & NETIF_F_RXFCS))) packet_size -= 4; /* CRC */ skb = netdev_alloc_skb_ip_align(netdev, packet_size); if (skb == NULL) goto skip_pkt; memcpy(skb->data, (u8 *)(prrs + 1), packet_size); skb_put(skb, packet_size); skb->protocol = eth_type_trans(skb, netdev); atl1e_rx_checksum(adapter, skb, prrs); if (prrs->pkt_flag & RRS_IS_VLAN_TAG) { u16 vlan_tag = (prrs->vtag >> 4) | ((prrs->vtag & 7) << 13) | ((prrs->vtag & 8) << 9); netdev_dbg(netdev, "RXD VLAN TAG<RRD>=0x%04x\n", prrs->vtag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); } napi_gro_receive(&adapter->napi, skb); skip_pkt: /* skip current packet whether it's ok or not. */ rx_page->read_offset += (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & RRS_PKT_SIZE_MASK) + sizeof(struct atl1e_recv_ret_status) + 31) & 0xFFFFFFE0); if (rx_page->read_offset >= rx_ring->page_size) { /* mark this page clean */ u16 reg_addr; u8 rx_using; rx_page->read_offset = *(rx_page->write_offset_addr) = 0; rx_using = rx_page_desc[que].rx_using; reg_addr = atl1e_rx_page_vld_regs[que][rx_using]; AT_WRITE_REGB(&adapter->hw, reg_addr, 1); rx_page_desc[que].rx_using ^= 1; rx_page = atl1e_get_rx_page(adapter, que); } write_offset = *(rx_page->write_offset_addr); } while (rx_page->read_offset < write_offset); } return; fatal_err: if (!test_bit(__AT_DOWN, &adapter->flags)) schedule_work(&adapter->reset_task); } /** * atl1e_clean - NAPI Rx polling callback * @napi: napi info * @budget: number of packets to clean */ static int atl1e_clean(struct napi_struct *napi, int budget) { struct atl1e_adapter *adapter = container_of(napi, struct atl1e_adapter, napi); u32 imr_data; int work_done = 0; /* Keep link state information with original netdev */ if (!netif_carrier_ok(adapter->netdev)) goto quit_polling; atl1e_clean_rx_irq(adapter, 0, &work_done, budget); /* If no Tx and not enough Rx work done, exit the polling mode */ if (work_done < budget) { quit_polling: napi_complete_done(napi, work_done); imr_data = AT_READ_REG(&adapter->hw, REG_IMR); AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); /* test debug */ if (test_bit(__AT_DOWN, &adapter->flags)) { atomic_dec(&adapter->irq_sem); netdev_err(adapter->netdev, "atl1e_clean is called when AT_DOWN\n"); } /* reenable RX intr */ /*atl1e_irq_enable(adapter); */ } return work_done; } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ static void atl1e_netpoll(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); disable_irq(adapter->pdev->irq); atl1e_intr(adapter->pdev->irq, netdev); enable_irq(adapter->pdev->irq); } #endif static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter) { struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; u16 next_to_use = 0; u16 next_to_clean = 0; next_to_clean = atomic_read(&tx_ring->next_to_clean); next_to_use = tx_ring->next_to_use; return (u16)(next_to_clean > next_to_use) ? (next_to_clean - next_to_use - 1) : (tx_ring->count + next_to_clean - next_to_use - 1); } /* * get next usable tpd * Note: should call atl1e_tdp_avail to make sure * there is enough tpd to use */ static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter) { struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; u16 next_to_use = 0; next_to_use = tx_ring->next_to_use; if (++tx_ring->next_to_use == tx_ring->count) tx_ring->next_to_use = 0; memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc)); return &tx_ring->desc[next_to_use]; } static struct atl1e_tx_buffer * atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd) { struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; return &tx_ring->tx_buffer[tpd - tx_ring->desc]; } /* Calculate the transmit packet descript needed*/ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb) { int i = 0; u16 tpd_req = 1; u16 fg_size = 0; u16 proto_hdr_len = 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT); } if (skb_is_gso(skb)) { if (skb->protocol == htons(ETH_P_IP) || (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) { proto_hdr_len = skb_tcp_all_headers(skb); if (proto_hdr_len < skb_headlen(skb)) { tpd_req += ((skb_headlen(skb) - proto_hdr_len + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT); } } } return tpd_req; } static int atl1e_tso_csum(struct atl1e_adapter *adapter, struct sk_buff *skb, struct atl1e_tpd_desc *tpd) { unsigned short offload_type; u8 hdr_len; u32 real_len; if (skb_is_gso(skb)) { int err; err = skb_cow_head(skb, 0); if (err < 0) return err; offload_type = skb_shinfo(skb)->gso_type; if (offload_type & SKB_GSO_TCPV4) { real_len = (((unsigned char *)ip_hdr(skb) - skb->data) + ntohs(ip_hdr(skb)->tot_len)); if (real_len < skb->len) { err = pskb_trim(skb, real_len); if (err) return err; } hdr_len = skb_tcp_all_headers(skb); if (unlikely(skb->len == hdr_len)) { /* only xsum need */ netdev_warn(adapter->netdev, "IPV4 tso with zero data??\n"); goto check_sum; } else { ip_hdr(skb)->check = 0; ip_hdr(skb)->tot_len = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic( ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); tpd->word3 |= (ip_hdr(skb)->ihl & TDP_V4_IPHL_MASK) << TPD_V4_IPHL_SHIFT; tpd->word3 |= ((tcp_hdrlen(skb) >> 2) & TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT; tpd->word3 |= ((skb_shinfo(skb)->gso_size) & TPD_MSS_MASK) << TPD_MSS_SHIFT; tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; } return 0; } } check_sum: if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { u8 css, cso; cso = skb_checksum_start_offset(skb); if (unlikely(cso & 0x1)) { netdev_err(adapter->netdev, "payload offset should not ant event number\n"); return -1; } else { css = cso + skb->csum_offset; tpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) << TPD_PLOADOFFSET_SHIFT; tpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) << TPD_CCSUMOFFSET_SHIFT; tpd->word3 |= 1 << TPD_CC_SEGMENT_EN_SHIFT; } } return 0; } static int atl1e_tx_map(struct atl1e_adapter *adapter, struct sk_buff *skb, struct atl1e_tpd_desc *tpd) { struct atl1e_tpd_desc *use_tpd = NULL; struct atl1e_tx_buffer *tx_buffer = NULL; u16 buf_len = skb_headlen(skb); u16 map_len = 0; u16 mapped_len = 0; u16 hdr_len = 0; u16 nr_frags; u16 f; int segment; int ring_start = adapter->tx_ring.next_to_use; int ring_end; nr_frags = skb_shinfo(skb)->nr_frags; segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; if (segment) { /* TSO */ hdr_len = skb_tcp_all_headers(skb); map_len = hdr_len; use_tpd = tpd; tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); tx_buffer->length = map_len; tx_buffer->dma = dma_map_single(&adapter->pdev->dev, skb->data, hdr_len, DMA_TO_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) return -ENOSPC; ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); mapped_len += map_len; use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | ((cpu_to_le32(tx_buffer->length) & TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); } while (mapped_len < buf_len) { /* mapped_len == 0, means we should use the first tpd, which is given by caller */ if (mapped_len == 0) { use_tpd = tpd; } else { use_tpd = atl1e_get_tpd(adapter); memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc)); } tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); tx_buffer->skb = NULL; tx_buffer->length = map_len = ((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ? MAX_TX_BUF_LEN : (buf_len - mapped_len); tx_buffer->dma = dma_map_single(&adapter->pdev->dev, skb->data + mapped_len, map_len, DMA_TO_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { /* We need to unwind the mappings we've done */ ring_end = adapter->tx_ring.next_to_use; adapter->tx_ring.next_to_use = ring_start; while (adapter->tx_ring.next_to_use != ring_end) { tpd = atl1e_get_tpd(adapter); tx_buffer = atl1e_get_tx_buffer(adapter, tpd); dma_unmap_single(&adapter->pdev->dev, tx_buffer->dma, tx_buffer->length, DMA_TO_DEVICE); } /* Reset the tx rings next pointer */ adapter->tx_ring.next_to_use = ring_start; return -ENOSPC; } ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); mapped_len += map_len; use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | ((cpu_to_le32(tx_buffer->length) & TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); } for (f = 0; f < nr_frags; f++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; u16 i; u16 seg_num; buf_len = skb_frag_size(frag); seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; for (i = 0; i < seg_num; i++) { use_tpd = atl1e_get_tpd(adapter); memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc)); tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); BUG_ON(tx_buffer->skb); tx_buffer->skb = NULL; tx_buffer->length = (buf_len > MAX_TX_BUF_LEN) ? MAX_TX_BUF_LEN : buf_len; buf_len -= tx_buffer->length; tx_buffer->dma = skb_frag_dma_map(&adapter->pdev->dev, frag, (i * MAX_TX_BUF_LEN), tx_buffer->length, DMA_TO_DEVICE); if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { /* We need to unwind the mappings we've done */ ring_end = adapter->tx_ring.next_to_use; adapter->tx_ring.next_to_use = ring_start; while (adapter->tx_ring.next_to_use != ring_end) { tpd = atl1e_get_tpd(adapter); tx_buffer = atl1e_get_tx_buffer(adapter, tpd); dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma, tx_buffer->length, DMA_TO_DEVICE); } /* Reset the ring next to use pointer */ adapter->tx_ring.next_to_use = ring_start; return -ENOSPC; } ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE); use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | ((cpu_to_le32(tx_buffer->length) & TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); } } if ((tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK) /* note this one is a tcp header */ tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; /* The last tpd */ use_tpd->word3 |= 1 << TPD_EOP_SHIFT; /* The last buffer info contain the skb address, so it will be free after unmap */ tx_buffer->skb = skb; return 0; } static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count, struct atl1e_tpd_desc *tpd) { struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use); } static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); u16 tpd_req = 1; struct atl1e_tpd_desc *tpd; if (test_bit(__AT_DOWN, &adapter->flags)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (unlikely(skb->len <= 0)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } tpd_req = atl1e_cal_tdp_req(skb); if (atl1e_tpd_avail(adapter) < tpd_req) { /* no enough descriptor, just stop queue */ netif_stop_queue(netdev); return NETDEV_TX_BUSY; } tpd = atl1e_get_tpd(adapter); if (skb_vlan_tag_present(skb)) { u16 vlan_tag = skb_vlan_tag_get(skb); u16 atl1e_vlan_tag; tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; AT_VLAN_TAG_TO_TPD_TAG(vlan_tag, atl1e_vlan_tag); tpd->word2 |= (atl1e_vlan_tag & TPD_VLANTAG_MASK) << TPD_VLAN_SHIFT; } if (skb->protocol == htons(ETH_P_8021Q)) tpd->word3 |= 1 << TPD_VL_TAGGED_SHIFT; if (skb_network_offset(skb) != ETH_HLEN) tpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; /* 802.3 frame */ /* do TSO and check sum */ if (atl1e_tso_csum(adapter, skb, tpd) != 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (atl1e_tx_map(adapter, skb, tpd)) { dev_kfree_skb_any(skb); goto out; } atl1e_tx_queue(adapter, tpd_req, tpd); out: return NETDEV_TX_OK; } static void atl1e_free_irq(struct atl1e_adapter *adapter) { struct net_device *netdev = adapter->netdev; free_irq(adapter->pdev->irq, netdev); } static int atl1e_request_irq(struct atl1e_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; int err = 0; err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name, netdev); if (err) { netdev_dbg(adapter->netdev, "Unable to allocate interrupt Error: %d\n", err); return err; } netdev_dbg(netdev, "atl1e_request_irq OK\n"); return err; } int atl1e_up(struct atl1e_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err = 0; u32 val; /* hardware has been reset, we need to reload some things */ err = atl1e_init_hw(&adapter->hw); if (err) { err = -EIO; return err; } atl1e_init_ring_ptrs(adapter); atl1e_set_multi(netdev); atl1e_restore_vlan(adapter); if (atl1e_configure(adapter)) { err = -EIO; goto err_up; } clear_bit(__AT_DOWN, &adapter->flags); napi_enable(&adapter->napi); atl1e_irq_enable(adapter); val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL); AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val | MASTER_CTRL_MANUAL_INT); err_up: return err; } void atl1e_down(struct atl1e_adapter *adapter) { struct net_device *netdev = adapter->netdev; /* signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ set_bit(__AT_DOWN, &adapter->flags); netif_stop_queue(netdev); /* reset MAC to disable all RX/TX */ atl1e_reset_hw(&adapter->hw); msleep(1); napi_disable(&adapter->napi); atl1e_del_timer(adapter); atl1e_irq_disable(adapter); netif_carrier_off(netdev); adapter->link_speed = SPEED_0; adapter->link_duplex = -1; atl1e_clean_tx_ring(adapter); atl1e_clean_rx_ring(adapter); } /** * atl1e_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. */ static int atl1e_open(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); int err; /* disallow open during test */ if (test_bit(__AT_TESTING, &adapter->flags)) return -EBUSY; /* allocate rx/tx dma buffer & descriptors */ atl1e_init_ring_resources(adapter); err = atl1e_setup_ring_resources(adapter); if (unlikely(err)) return err; err = atl1e_request_irq(adapter); if (unlikely(err)) goto err_req_irq; err = atl1e_up(adapter); if (unlikely(err)) goto err_up; return 0; err_up: atl1e_free_irq(adapter); err_req_irq: atl1e_free_ring_resources(adapter); atl1e_reset_hw(&adapter->hw); return err; } /** * atl1e_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. */ static int atl1e_close(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); atl1e_down(adapter); atl1e_free_irq(adapter); atl1e_free_ring_resources(adapter); return 0; } static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; u32 ctrl = 0; u32 mac_ctrl_data = 0; u32 wol_ctrl_data = 0; u16 mii_advertise_data = 0; u16 mii_bmsr_data = 0; u16 mii_intr_status_data = 0; u32 wufc = adapter->wol; u32 i; #ifdef CONFIG_PM int retval = 0; #endif if (netif_running(netdev)) { WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); atl1e_down(adapter); } netif_device_detach(netdev); #ifdef CONFIG_PM retval = pci_save_state(pdev); if (retval) return retval; #endif if (wufc) { /* get link status */ atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data); atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data); mii_advertise_data = ADVERTISE_10HALF; if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) || (atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_advertise_data) != 0) || (atl1e_phy_commit(hw)) != 0) { netdev_dbg(adapter->netdev, "set phy register failed\n"); goto wol_dis; } hw->phy_configured = false; /* re-init PHY when resume */ /* turn on magic packet wol */ if (wufc & AT_WUFC_MAG) wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; if (wufc & AT_WUFC_LNKC) { /* if orignal link status is link, just wait for retrive link */ if (mii_bmsr_data & BMSR_LSTATUS) { for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { msleep(100); atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data); if (mii_bmsr_data & BMSR_LSTATUS) break; } if ((mii_bmsr_data & BMSR_LSTATUS) == 0) netdev_dbg(adapter->netdev, "Link may change when suspend\n"); } wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; /* only link up can wake up */ if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) { netdev_dbg(adapter->netdev, "read write phy register failed\n"); goto wol_dis; } } /* clear phy interrupt */ atl1e_read_phy_reg(hw, MII_INT_STATUS, &mii_intr_status_data); /* Config MAC Ctrl register */ mac_ctrl_data = MAC_CTRL_RX_EN; /* set to 10/100M halt duplex */ mac_ctrl_data |= MAC_CTRL_SPEED_10_100 << MAC_CTRL_SPEED_SHIFT; mac_ctrl_data |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); __atl1e_vlan_mode(netdev->features, &mac_ctrl_data); /* magic packet maybe Broadcast&multicast&Unicast frame */ if (wufc & AT_WUFC_MAG) mac_ctrl_data |= MAC_CTRL_BC_EN; netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n", mac_ctrl_data); AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); /* pcie patch */ ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); goto suspend_exit; } wol_dis: /* WOL disabled */ AT_WRITE_REG(hw, REG_WOL_CTRL, 0); /* pcie patch */ ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); atl1e_force_ps(hw); hw->phy_configured = false; /* re-init PHY when resume */ pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); suspend_exit: if (netif_running(netdev)) atl1e_free_irq(adapter); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } #ifdef CONFIG_PM static int atl1e_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1e_adapter *adapter = netdev_priv(netdev); u32 err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); err = pci_enable_device(pdev); if (err) { netdev_err(adapter->netdev, "Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); AT_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */ pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); if (netif_running(netdev)) { err = atl1e_request_irq(adapter); if (err) return err; } atl1e_reset_hw(&adapter->hw); if (netif_running(netdev)) atl1e_up(adapter); netif_device_attach(netdev); return 0; } #endif static void atl1e_shutdown(struct pci_dev *pdev) { atl1e_suspend(pdev, PMSG_SUSPEND); } static const struct net_device_ops atl1e_netdev_ops = { .ndo_open = atl1e_open, .ndo_stop = atl1e_close, .ndo_start_xmit = atl1e_xmit_frame, .ndo_get_stats = atl1e_get_stats, .ndo_set_rx_mode = atl1e_set_multi, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = atl1e_set_mac_addr, .ndo_fix_features = atl1e_fix_features, .ndo_set_features = atl1e_set_features, .ndo_change_mtu = atl1e_change_mtu, .ndo_eth_ioctl = atl1e_ioctl, .ndo_tx_timeout = atl1e_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = atl1e_netpoll, #endif }; static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) { SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); netdev->netdev_ops = &atl1e_netdev_ops; netdev->watchdog_timeo = AT_TX_WATCHDOG; /* MTU range: 42 - 8170 */ netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN); netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); atl1e_set_ethtool_ops(netdev); netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_RX; netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX; /* not enabled by default */ netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS; return 0; } /** * atl1e_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in atl1e_pci_tbl * * Returns 0 on success, negative on failure * * atl1e_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. */ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct atl1e_adapter *adapter = NULL; static int cards_found; int err = 0; err = pci_enable_device(pdev); if (err) return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n"); /* * The atl1e chip can DMA to 64-bit addresses, but it uses a single * shared register for the high 32 bits, so only a single, aligned, * 4 GB physical address range can be used at a time. * * Supporting 64-bit DMA on this hardware is more trouble than it's * worth. It is far easier to limit to 32-bit DMA than update * various kernel subsystems to support the mechanics required by a * fixed-high-32-bit system. */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); goto err_dma; } err = pci_request_regions(pdev, atl1e_driver_name); if (err) { dev_err(&pdev->dev, "cannot obtain PCI resources\n"); goto err_pci_reg; } pci_set_master(pdev); netdev = alloc_etherdev(sizeof(struct atl1e_adapter)); if (netdev == NULL) { err = -ENOMEM; goto err_alloc_etherdev; } err = atl1e_init_netdev(netdev, pdev); if (err) { netdev_err(netdev, "init netdevice failed\n"); goto err_init_netdev; } adapter = netdev_priv(netdev); adapter->bd_number = cards_found; adapter->netdev = netdev; adapter->pdev = pdev; adapter->hw.adapter = adapter; adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0); if (!adapter->hw.hw_addr) { err = -EIO; netdev_err(netdev, "cannot map device registers\n"); goto err_ioremap; } /* init mii data */ adapter->mii.dev = netdev; adapter->mii.mdio_read = atl1e_mdio_read; adapter->mii.mdio_write = atl1e_mdio_write; adapter->mii.phy_id_mask = 0x1f; adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK; netif_napi_add(netdev, &adapter->napi, atl1e_clean); timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0); /* get user settings */ atl1e_check_options(adapter); /* * Mark all PCI regions associated with PCI device * pdev as being reserved by owner atl1e_driver_name * Enables bus-mastering on the device and calls * pcibios_set_master to do the needed arch specific settings */ atl1e_setup_pcicmd(pdev); /* setup the private structure */ err = atl1e_sw_init(adapter); if (err) { netdev_err(netdev, "net device private data init failed\n"); goto err_sw_init; } /* Init GPHY as early as possible due to power saving issue */ atl1e_phy_init(&adapter->hw); /* reset the controller to * put the device in a known good starting state */ err = atl1e_reset_hw(&adapter->hw); if (err) { err = -EIO; goto err_reset; } if (atl1e_read_mac_addr(&adapter->hw) != 0) { err = -EIO; netdev_err(netdev, "get mac address failed\n"); goto err_eeprom; } eth_hw_addr_set(netdev, adapter->hw.mac_addr); netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr); INIT_WORK(&adapter->reset_task, atl1e_reset_task); INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); netif_set_tso_max_size(netdev, MAX_TSO_SEG_SIZE); err = register_netdev(netdev); if (err) { netdev_err(netdev, "register netdevice failed\n"); goto err_register; } /* assume we have no link for now */ netif_stop_queue(netdev); netif_carrier_off(netdev); cards_found++; return 0; err_reset: err_register: err_sw_init: err_eeprom: pci_iounmap(pdev, adapter->hw.hw_addr); err_init_netdev: err_ioremap: free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /** * atl1e_remove - Device Removal Routine * @pdev: PCI device information struct * * atl1e_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. */ static void atl1e_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1e_adapter *adapter = netdev_priv(netdev); /* * flush_scheduled work may reschedule our watchdog task, so * explicitly disable watchdog tasks from being rescheduled */ set_bit(__AT_DOWN, &adapter->flags); atl1e_del_timer(adapter); atl1e_cancel_work(adapter); unregister_netdev(netdev); atl1e_free_ring_resources(adapter); atl1e_force_ps(&adapter->hw); pci_iounmap(pdev, adapter->hw.hw_addr); pci_release_regions(pdev); free_netdev(netdev); pci_disable_device(pdev); } /** * atl1e_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected. */ static pci_ers_result_t atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1e_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (netif_running(netdev)) atl1e_down(adapter); pci_disable_device(pdev); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** * atl1e_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the e1000_resume routine. */ static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1e_adapter *adapter = netdev_priv(netdev); if (pci_enable_device(pdev)) { netdev_err(adapter->netdev, "Cannot re-enable PCI device after reset\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); atl1e_reset_hw(&adapter->hw); return PCI_ERS_RESULT_RECOVERED; } /** * atl1e_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the * second-half of the atl1e_resume routine. */ static void atl1e_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1e_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) { if (atl1e_up(adapter)) { netdev_err(adapter->netdev, "can't bring device back up after reset\n"); return; } } netif_device_attach(netdev); } static const struct pci_error_handlers atl1e_err_handler = { .error_detected = atl1e_io_error_detected, .slot_reset = atl1e_io_slot_reset, .resume = atl1e_io_resume, }; static struct pci_driver atl1e_driver = { .name = atl1e_driver_name, .id_table = atl1e_pci_tbl, .probe = atl1e_probe, .remove = atl1e_remove, /* Power Management Hooks */ #ifdef CONFIG_PM .suspend = atl1e_suspend, .resume = atl1e_resume, #endif .shutdown = atl1e_shutdown, .err_handler = &atl1e_err_handler }; module_pci_driver(atl1e_driver);
linux-master
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 2007 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. */ #include <linux/netdevice.h> #include "atl1e.h" /* This is the only thing that needs to be changed to adjust the * maximum number of ports that the driver can manage. */ #define ATL1E_MAX_NIC 32 #define OPTION_UNSET -1 #define OPTION_DISABLED 0 #define OPTION_ENABLED 1 /* All parameters are treated the same, as an integer array of values. * This macro just reduces the need to repeat the same declaration code * over and over (plus this helps to avoid typo bugs). */ #define ATL1E_PARAM_INIT { [0 ... ATL1E_MAX_NIC] = OPTION_UNSET } #define ATL1E_PARAM(x, desc) \ static int x[ATL1E_MAX_NIC + 1] = ATL1E_PARAM_INIT; \ static unsigned int num_##x; \ module_param_array_named(x, x, int, &num_##x, 0); \ MODULE_PARM_DESC(x, desc); /* Transmit Memory count * * Valid Range: 64-2048 * * Default Value: 128 */ #define ATL1E_MIN_TX_DESC_CNT 32 #define ATL1E_MAX_TX_DESC_CNT 1020 #define ATL1E_DEFAULT_TX_DESC_CNT 128 ATL1E_PARAM(tx_desc_cnt, "Transmit description count"); /* Receive Memory Block Count * * Valid Range: 16-512 * * Default Value: 128 */ #define ATL1E_MIN_RX_MEM_SIZE 8 /* 8KB */ #define ATL1E_MAX_RX_MEM_SIZE 1024 /* 1MB */ #define ATL1E_DEFAULT_RX_MEM_SIZE 256 /* 128KB */ ATL1E_PARAM(rx_mem_size, "memory size of rx buffer(KB)"); /* User Specified MediaType Override * * Valid Range: 0-5 * - 0 - auto-negotiate at all supported speeds * - 1 - only link at 100Mbps Full Duplex * - 2 - only link at 100Mbps Half Duplex * - 3 - only link at 10Mbps Full Duplex * - 4 - only link at 10Mbps Half Duplex * Default Value: 0 */ ATL1E_PARAM(media_type, "MediaType Select"); /* Interrupt Moderate Timer in units of 2 us * * Valid Range: 10-65535 * * Default Value: 45000(90ms) */ #define INT_MOD_DEFAULT_CNT 100 /* 200us */ #define INT_MOD_MAX_CNT 65000 #define INT_MOD_MIN_CNT 50 ATL1E_PARAM(int_mod_timer, "Interrupt Moderator Timer"); #define AUTONEG_ADV_DEFAULT 0x2F #define AUTONEG_ADV_MASK 0x2F #define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL #define FLASH_VENDOR_DEFAULT 0 #define FLASH_VENDOR_MIN 0 #define FLASH_VENDOR_MAX 2 struct atl1e_option { enum { enable_option, range_option, list_option } type; char *name; char *err; int def; union { struct { /* range_option info */ int min; int max; } r; struct { /* list_option info */ int nr; struct atl1e_opt_list { int i; char *str; } *p; } l; } arg; }; static int atl1e_validate_option(int *value, struct atl1e_option *opt, struct atl1e_adapter *adapter) { if (*value == OPTION_UNSET) { *value = opt->def; return 0; } switch (opt->type) { case enable_option: switch (*value) { case OPTION_ENABLED: netdev_info(adapter->netdev, "%s Enabled\n", opt->name); return 0; case OPTION_DISABLED: netdev_info(adapter->netdev, "%s Disabled\n", opt->name); return 0; } break; case range_option: if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { netdev_info(adapter->netdev, "%s set to %i\n", opt->name, *value); return 0; } break; case list_option:{ int i; struct atl1e_opt_list *ent; for (i = 0; i < opt->arg.l.nr; i++) { ent = &opt->arg.l.p[i]; if (*value == ent->i) { if (ent->str[0] != '\0') netdev_info(adapter->netdev, "%s\n", ent->str); return 0; } } break; } default: BUG(); } netdev_info(adapter->netdev, "Invalid %s specified (%i) %s\n", opt->name, *value, opt->err); *value = opt->def; return -1; } /** * atl1e_check_options - Range Checking for Command Line Parameters * @adapter: board private structure * * This routine checks all command line parameters for valid user * input. If an invalid value is given, or if no user specified * value exists, a default value is used. The final value is stored * in a variable in the adapter structure. */ void atl1e_check_options(struct atl1e_adapter *adapter) { int bd = adapter->bd_number; if (bd >= ATL1E_MAX_NIC) { netdev_notice(adapter->netdev, "no configuration for board #%i\n", bd); netdev_notice(adapter->netdev, "Using defaults for all values\n"); } { /* Transmit Ring Size */ struct atl1e_option opt = { .type = range_option, .name = "Transmit Ddescription Count", .err = "using default of " __MODULE_STRING(ATL1E_DEFAULT_TX_DESC_CNT), .def = ATL1E_DEFAULT_TX_DESC_CNT, .arg = { .r = { .min = ATL1E_MIN_TX_DESC_CNT, .max = ATL1E_MAX_TX_DESC_CNT} } }; int val; if (num_tx_desc_cnt > bd) { val = tx_desc_cnt[bd]; atl1e_validate_option(&val, &opt, adapter); adapter->tx_ring.count = (u16) val & 0xFFFC; } else adapter->tx_ring.count = (u16)opt.def; } { /* Receive Memory Block Count */ struct atl1e_option opt = { .type = range_option, .name = "Memory size of rx buffer(KB)", .err = "using default of " __MODULE_STRING(ATL1E_DEFAULT_RX_MEM_SIZE), .def = ATL1E_DEFAULT_RX_MEM_SIZE, .arg = { .r = { .min = ATL1E_MIN_RX_MEM_SIZE, .max = ATL1E_MAX_RX_MEM_SIZE} } }; int val; if (num_rx_mem_size > bd) { val = rx_mem_size[bd]; atl1e_validate_option(&val, &opt, adapter); adapter->rx_ring.page_size = (u32)val * 1024; } else { adapter->rx_ring.page_size = (u32)opt.def * 1024; } } { /* Interrupt Moderate Timer */ struct atl1e_option opt = { .type = range_option, .name = "Interrupt Moderate Timer", .err = "using default of " __MODULE_STRING(INT_MOD_DEFAULT_CNT), .def = INT_MOD_DEFAULT_CNT, .arg = { .r = { .min = INT_MOD_MIN_CNT, .max = INT_MOD_MAX_CNT} } } ; int val; if (num_int_mod_timer > bd) { val = int_mod_timer[bd]; atl1e_validate_option(&val, &opt, adapter); adapter->hw.imt = (u16) val; } else adapter->hw.imt = (u16)(opt.def); } { /* MediaType */ struct atl1e_option opt = { .type = range_option, .name = "Speed/Duplex Selection", .err = "using default of " __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR), .def = MEDIA_TYPE_AUTO_SENSOR, .arg = { .r = { .min = MEDIA_TYPE_AUTO_SENSOR, .max = MEDIA_TYPE_10M_HALF} } } ; int val; if (num_media_type > bd) { val = media_type[bd]; atl1e_validate_option(&val, &opt, adapter); adapter->hw.media_type = (u16) val; } else adapter->hw.media_type = (u16)(opt.def); } }
linux-master
drivers/net/ethernet/atheros/atl1e/atl1e_param.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2010-2011 Calxeda, Inc. */ #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/kernel.h> #include <linux/circ_buf.h> #include <linux/interrupt.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/if.h> #include <linux/crc32.h> #include <linux/dma-mapping.h> #include <linux/slab.h> /* XGMAC Register definitions */ #define XGMAC_CONTROL 0x00000000 /* MAC Configuration */ #define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */ #define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */ #define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */ #define XGMAC_VERSION 0x00000020 /* Version */ #define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */ #define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */ #define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */ #define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */ #define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */ #define XGMAC_DEBUG 0x00000038 /* Debug */ #define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */ #define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8)) #define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8)) #define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */ #define XGMAC_NUM_HASH 16 #define XGMAC_OMR 0x00000400 #define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */ #define XGMAC_PMT 0x00000704 /* PMT Control and Status */ #define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */ #define XGMAC_MMC_INTR_RX 0x00000804 /* Receive Interrupt */ #define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */ #define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Receive Interrupt Mask */ #define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */ /* Hardware TX Statistics Counters */ #define XGMAC_MMC_TXOCTET_GB_LO 0x00000814 #define XGMAC_MMC_TXOCTET_GB_HI 0x00000818 #define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C #define XGMAC_MMC_TXFRAME_GB_HI 0x00000820 #define XGMAC_MMC_TXBCFRAME_G 0x00000824 #define XGMAC_MMC_TXMCFRAME_G 0x0000082C #define XGMAC_MMC_TXUCFRAME_GB 0x00000864 #define XGMAC_MMC_TXMCFRAME_GB 0x0000086C #define XGMAC_MMC_TXBCFRAME_GB 0x00000874 #define XGMAC_MMC_TXUNDERFLOW 0x0000087C #define XGMAC_MMC_TXOCTET_G_LO 0x00000884 #define XGMAC_MMC_TXOCTET_G_HI 0x00000888 #define XGMAC_MMC_TXFRAME_G_LO 0x0000088C #define XGMAC_MMC_TXFRAME_G_HI 0x00000890 #define XGMAC_MMC_TXPAUSEFRAME 0x00000894 #define XGMAC_MMC_TXVLANFRAME 0x0000089C /* Hardware RX Statistics Counters */ #define XGMAC_MMC_RXFRAME_GB_LO 0x00000900 #define XGMAC_MMC_RXFRAME_GB_HI 0x00000904 #define XGMAC_MMC_RXOCTET_GB_LO 0x00000908 #define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C #define XGMAC_MMC_RXOCTET_G_LO 0x00000910 #define XGMAC_MMC_RXOCTET_G_HI 0x00000914 #define XGMAC_MMC_RXBCFRAME_G 0x00000918 #define XGMAC_MMC_RXMCFRAME_G 0x00000920 #define XGMAC_MMC_RXCRCERR 0x00000928 #define XGMAC_MMC_RXRUNT 0x00000930 #define XGMAC_MMC_RXJABBER 0x00000934 #define XGMAC_MMC_RXUCFRAME_G 0x00000970 #define XGMAC_MMC_RXLENGTHERR 0x00000978 #define XGMAC_MMC_RXPAUSEFRAME 0x00000988 #define XGMAC_MMC_RXOVERFLOW 0x00000990 #define XGMAC_MMC_RXVLANFRAME 0x00000998 #define XGMAC_MMC_RXWATCHDOG 0x000009a0 /* DMA Control and Status Registers */ #define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */ #define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */ #define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */ #define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */ #define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */ #define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */ #define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */ #define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */ #define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */ #define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */ #define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */ #define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */ #define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */ #define XGMAC_ADDR_AE 0x80000000 /* PMT Control and Status */ #define XGMAC_PMT_POINTER_RESET 0x80000000 #define XGMAC_PMT_GLBL_UNICAST 0x00000200 #define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040 #define XGMAC_PMT_MAGIC_PKT 0x00000020 #define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004 #define XGMAC_PMT_MAGIC_PKT_EN 0x00000002 #define XGMAC_PMT_POWERDOWN 0x00000001 #define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */ #define XGMAC_CONTROL_SPD_MASK 0x60000000 #define XGMAC_CONTROL_SPD_1G 0x60000000 #define XGMAC_CONTROL_SPD_2_5G 0x40000000 #define XGMAC_CONTROL_SPD_10G 0x00000000 #define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */ #define XGMAC_CONTROL_SARK_MASK 0x18000000 #define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */ #define XGMAC_CONTROL_CAR_MASK 0x06000000 #define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */ #define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */ #define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */ #define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ #define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ #define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ #define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */ #define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */ #define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ #define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ /* XGMAC Frame Filter defines */ #define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ #define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ #define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ #define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ #define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ #define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ #define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ #define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ #define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ #define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */ #define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */ #define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ /* XGMAC FLOW CTRL defines */ #define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ #define XGMAC_FLOW_CTRL_PT_SHIFT 16 #define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */ #define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshold */ #define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */ #define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */ #define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ #define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ /* XGMAC_INT_STAT reg */ #define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */ #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ /* DMA Bus Mode register defines */ #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ #define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */ /* Programmable burst length */ #define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ #define DMA_BUS_MODE_PBL_SHIFT 8 #define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ #define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ #define DMA_BUS_MODE_RPBL_SHIFT 17 #define DMA_BUS_MODE_USP 0x00800000 #define DMA_BUS_MODE_8PBL 0x01000000 #define DMA_BUS_MODE_AAL 0x02000000 /* DMA Bus Mode register defines */ #define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ #define DMA_BUS_PR_RATIO_SHIFT 14 #define DMA_BUS_FB 0x00010000 /* Fixed Burst */ /* DMA Control register defines */ #define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ #define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ #define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */ #define DMA_CONTROL_OSF 0x00000004 /* Operate on 2nd tx frame */ /* DMA Normal interrupt */ #define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ #define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */ #define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */ #define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */ #define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */ #define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */ #define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */ #define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */ #define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */ #define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */ #define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */ #define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */ #define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */ #define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */ #define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ #define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE) #define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \ DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \ DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \ DMA_INTR_ENA_TSE) /* DMA default interrupt mask */ #define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) /* DMA Status register defines */ #define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ #define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ #define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ #define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ #define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ #define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ #define DMA_STATUS_TS_SHIFT 20 #define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ #define DMA_STATUS_RS_SHIFT 17 #define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ #define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ #define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ #define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ #define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ #define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ #define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ #define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ #define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ #define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ #define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ #define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ #define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */ #define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ /* Common MAC defines */ #define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ #define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */ /* XGMAC Operation Mode Register */ #define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */ #define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */ #define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshold Ctrl */ #define XGMAC_OMR_TTC_MASK 0x00030000 #define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshold */ #define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshold MASK */ #define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshold */ #define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshold MASK */ #define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */ #define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */ #define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */ #define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */ #define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshold Ctrl */ #define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshold Ctrl MASK */ /* XGMAC HW Features Register */ #define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */ #define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008 /* XGMAC Descriptor Defines */ #define MAX_DESC_BUF_SZ (0x2000 - 8) #define RXDESC_EXT_STATUS 0x00000001 #define RXDESC_CRC_ERR 0x00000002 #define RXDESC_RX_ERR 0x00000008 #define RXDESC_RX_WDOG 0x00000010 #define RXDESC_FRAME_TYPE 0x00000020 #define RXDESC_GIANT_FRAME 0x00000080 #define RXDESC_LAST_SEG 0x00000100 #define RXDESC_FIRST_SEG 0x00000200 #define RXDESC_VLAN_FRAME 0x00000400 #define RXDESC_OVERFLOW_ERR 0x00000800 #define RXDESC_LENGTH_ERR 0x00001000 #define RXDESC_SA_FILTER_FAIL 0x00002000 #define RXDESC_DESCRIPTOR_ERR 0x00004000 #define RXDESC_ERROR_SUMMARY 0x00008000 #define RXDESC_FRAME_LEN_OFFSET 16 #define RXDESC_FRAME_LEN_MASK 0x3fff0000 #define RXDESC_DA_FILTER_FAIL 0x40000000 #define RXDESC1_END_RING 0x00008000 #define RXDESC_IP_PAYLOAD_MASK 0x00000003 #define RXDESC_IP_PAYLOAD_UDP 0x00000001 #define RXDESC_IP_PAYLOAD_TCP 0x00000002 #define RXDESC_IP_PAYLOAD_ICMP 0x00000003 #define RXDESC_IP_HEADER_ERR 0x00000008 #define RXDESC_IP_PAYLOAD_ERR 0x00000010 #define RXDESC_IPV4_PACKET 0x00000040 #define RXDESC_IPV6_PACKET 0x00000080 #define TXDESC_UNDERFLOW_ERR 0x00000001 #define TXDESC_JABBER_TIMEOUT 0x00000002 #define TXDESC_LOCAL_FAULT 0x00000004 #define TXDESC_REMOTE_FAULT 0x00000008 #define TXDESC_VLAN_FRAME 0x00000010 #define TXDESC_FRAME_FLUSHED 0x00000020 #define TXDESC_IP_HEADER_ERR 0x00000040 #define TXDESC_PAYLOAD_CSUM_ERR 0x00000080 #define TXDESC_ERROR_SUMMARY 0x00008000 #define TXDESC_SA_CTRL_INSERT 0x00040000 #define TXDESC_SA_CTRL_REPLACE 0x00080000 #define TXDESC_2ND_ADDR_CHAINED 0x00100000 #define TXDESC_END_RING 0x00200000 #define TXDESC_CSUM_IP 0x00400000 #define TXDESC_CSUM_IP_PAYLD 0x00800000 #define TXDESC_CSUM_ALL 0x00C00000 #define TXDESC_CRC_EN_REPLACE 0x01000000 #define TXDESC_CRC_EN_APPEND 0x02000000 #define TXDESC_DISABLE_PAD 0x04000000 #define TXDESC_FIRST_SEG 0x10000000 #define TXDESC_LAST_SEG 0x20000000 #define TXDESC_INTERRUPT 0x40000000 #define DESC_OWN 0x80000000 #define DESC_BUFFER1_SZ_MASK 0x00001fff #define DESC_BUFFER2_SZ_MASK 0x1fff0000 #define DESC_BUFFER2_SZ_OFFSET 16 struct xgmac_dma_desc { __le32 flags; __le32 buf_size; __le32 buf1_addr; /* Buffer 1 Address Pointer */ __le32 buf2_addr; /* Buffer 2 Address Pointer */ __le32 ext_status; __le32 res[3]; }; struct xgmac_extra_stats { /* Transmit errors */ unsigned long tx_jabber; unsigned long tx_frame_flushed; unsigned long tx_payload_error; unsigned long tx_ip_header_error; unsigned long tx_local_fault; unsigned long tx_remote_fault; /* Receive errors */ unsigned long rx_watchdog; unsigned long rx_da_filter_fail; unsigned long rx_payload_error; unsigned long rx_ip_header_error; /* Tx/Rx IRQ errors */ unsigned long tx_process_stopped; unsigned long rx_buf_unav; unsigned long rx_process_stopped; unsigned long tx_early; unsigned long fatal_bus_error; }; struct xgmac_priv { struct xgmac_dma_desc *dma_rx; struct sk_buff **rx_skbuff; unsigned int rx_tail; unsigned int rx_head; struct xgmac_dma_desc *dma_tx; struct sk_buff **tx_skbuff; unsigned int tx_head; unsigned int tx_tail; int tx_irq_cnt; void __iomem *base; unsigned int dma_buf_sz; dma_addr_t dma_rx_phy; dma_addr_t dma_tx_phy; struct net_device *dev; struct device *device; struct napi_struct napi; int max_macs; struct xgmac_extra_stats xstats; spinlock_t stats_lock; int pmt_irq; char rx_pause; char tx_pause; int wolopts; struct work_struct tx_timeout_work; }; /* XGMAC Configuration Settings */ #define XGMAC_MAX_MTU 9000 #define PAUSE_TIME 0x400 #define DMA_RX_RING_SZ 256 #define DMA_TX_RING_SZ 128 /* minimum number of free TX descriptors required to wake up TX process */ #define TX_THRESH (DMA_TX_RING_SZ/4) /* DMA descriptor ring helpers */ #define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1)) #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) #define tx_dma_ring_space(p) \ dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ) /* XGMAC Descriptor Access Helpers */ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) { if (buf_sz > MAX_DESC_BUF_SZ) p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ | (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET); else p->buf_size = cpu_to_le32(buf_sz); } static inline int desc_get_buf_len(struct xgmac_dma_desc *p) { u32 len = le32_to_cpu(p->buf_size); return (len & DESC_BUFFER1_SZ_MASK) + ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); } static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size, int buf_sz) { struct xgmac_dma_desc *end = p + ring_size - 1; memset(p, 0, sizeof(*p) * ring_size); for (; p <= end; p++) desc_set_buf_len(p, buf_sz); end->buf_size |= cpu_to_le32(RXDESC1_END_RING); } static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size) { memset(p, 0, sizeof(*p) * ring_size); p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING); } static inline int desc_get_owner(struct xgmac_dma_desc *p) { return le32_to_cpu(p->flags) & DESC_OWN; } static inline void desc_set_rx_owner(struct xgmac_dma_desc *p) { /* Clear all fields and set the owner */ p->flags = cpu_to_le32(DESC_OWN); } static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags) { u32 tmpflags = le32_to_cpu(p->flags); tmpflags &= TXDESC_END_RING; tmpflags |= flags | DESC_OWN; p->flags = cpu_to_le32(tmpflags); } static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p) { u32 tmpflags = le32_to_cpu(p->flags); tmpflags &= TXDESC_END_RING; p->flags = cpu_to_le32(tmpflags); } static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) { return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; } static inline int desc_get_tx_fs(struct xgmac_dma_desc *p) { return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG; } static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) { return le32_to_cpu(p->buf1_addr); } static inline void desc_set_buf_addr(struct xgmac_dma_desc *p, u32 paddr, int len) { p->buf1_addr = cpu_to_le32(paddr); if (len > MAX_DESC_BUF_SZ) p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ); } static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p, u32 paddr, int len) { desc_set_buf_len(p, len); desc_set_buf_addr(p, paddr, len); } static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p) { u32 data = le32_to_cpu(p->flags); u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET; if (data & RXDESC_FRAME_TYPE) len -= ETH_FCS_LEN; return len; } static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr) { int timeout = 1000; u32 reg = readl(ioaddr + XGMAC_OMR); writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR); while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF) udelay(1); } static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) { struct xgmac_extra_stats *x = &priv->xstats; u32 status = le32_to_cpu(p->flags); if (!(status & TXDESC_ERROR_SUMMARY)) return 0; netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status); if (status & TXDESC_JABBER_TIMEOUT) x->tx_jabber++; if (status & TXDESC_FRAME_FLUSHED) x->tx_frame_flushed++; if (status & TXDESC_UNDERFLOW_ERR) xgmac_dma_flush_tx_fifo(priv->base); if (status & TXDESC_IP_HEADER_ERR) x->tx_ip_header_error++; if (status & TXDESC_LOCAL_FAULT) x->tx_local_fault++; if (status & TXDESC_REMOTE_FAULT) x->tx_remote_fault++; if (status & TXDESC_PAYLOAD_CSUM_ERR) x->tx_payload_error++; return -1; } static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) { struct xgmac_extra_stats *x = &priv->xstats; int ret = CHECKSUM_UNNECESSARY; u32 status = le32_to_cpu(p->flags); u32 ext_status = le32_to_cpu(p->ext_status); if (status & RXDESC_DA_FILTER_FAIL) { netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n"); x->rx_da_filter_fail++; return -1; } /* All frames should fit into a single buffer */ if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG)) return -1; /* Check if packet has checksum already */ if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) && !(ext_status & RXDESC_IP_PAYLOAD_MASK)) ret = CHECKSUM_NONE; netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n", (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status); if (!(status & RXDESC_ERROR_SUMMARY)) return ret; /* Handle any errors */ if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR | RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR)) return -1; if (status & RXDESC_EXT_STATUS) { if (ext_status & RXDESC_IP_HEADER_ERR) x->rx_ip_header_error++; if (ext_status & RXDESC_IP_PAYLOAD_ERR) x->rx_payload_error++; netdev_dbg(priv->dev, "IP checksum error - stat %08x\n", ext_status); return CHECKSUM_NONE; } return ret; } static inline void xgmac_mac_enable(void __iomem *ioaddr) { u32 value = readl(ioaddr + XGMAC_CONTROL); value |= MAC_ENABLE_RX | MAC_ENABLE_TX; writel(value, ioaddr + XGMAC_CONTROL); value = readl(ioaddr + XGMAC_DMA_CONTROL); value |= DMA_CONTROL_ST | DMA_CONTROL_SR; writel(value, ioaddr + XGMAC_DMA_CONTROL); } static inline void xgmac_mac_disable(void __iomem *ioaddr) { u32 value = readl(ioaddr + XGMAC_DMA_CONTROL); value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); writel(value, ioaddr + XGMAC_DMA_CONTROL); value = readl(ioaddr + XGMAC_CONTROL); value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX); writel(value, ioaddr + XGMAC_CONTROL); } static void xgmac_set_mac_addr(void __iomem *ioaddr, const unsigned char *addr, int num) { u32 data; if (addr) { data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; writel(data, ioaddr + XGMAC_ADDR_LOW(num)); } else { writel(0, ioaddr + XGMAC_ADDR_HIGH(num)); writel(0, ioaddr + XGMAC_ADDR_LOW(num)); } } static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, int num) { u32 hi_addr, lo_addr; /* Read the MAC address from the hardware */ hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num)); lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num)); /* Extract the MAC address from the high and low words */ addr[0] = lo_addr & 0xff; addr[1] = (lo_addr >> 8) & 0xff; addr[2] = (lo_addr >> 16) & 0xff; addr[3] = (lo_addr >> 24) & 0xff; addr[4] = hi_addr & 0xff; addr[5] = (hi_addr >> 8) & 0xff; } static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx) { u32 reg; unsigned int flow = 0; priv->rx_pause = rx; priv->tx_pause = tx; if (rx || tx) { if (rx) flow |= XGMAC_FLOW_CTRL_RFE; if (tx) flow |= XGMAC_FLOW_CTRL_TFE; flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP; flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT); writel(flow, priv->base + XGMAC_FLOW_CTRL); reg = readl(priv->base + XGMAC_OMR); reg |= XGMAC_OMR_EFC; writel(reg, priv->base + XGMAC_OMR); } else { writel(0, priv->base + XGMAC_FLOW_CTRL); reg = readl(priv->base + XGMAC_OMR); reg &= ~XGMAC_OMR_EFC; writel(reg, priv->base + XGMAC_OMR); } return 0; } static void xgmac_rx_refill(struct xgmac_priv *priv) { struct xgmac_dma_desc *p; dma_addr_t paddr; int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN; while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) { int entry = priv->rx_head; struct sk_buff *skb; p = priv->dma_rx + entry; if (priv->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb_ip_align(priv->dev, bufsz); if (unlikely(skb == NULL)) break; paddr = dma_map_single(priv->device, skb->data, priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); if (dma_mapping_error(priv->device, paddr)) { dev_kfree_skb_any(skb); break; } priv->rx_skbuff[entry] = skb; desc_set_buf_addr(p, paddr, priv->dma_buf_sz); } netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n", priv->rx_head, priv->rx_tail); priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ); desc_set_rx_owner(p); } } /** * xgmac_dma_desc_rings_init - init the RX/TX descriptor rings * @dev: net device structure * Description: this function initializes the DMA RX/TX descriptors * and allocates the socket buffers. */ static int xgmac_dma_desc_rings_init(struct net_device *dev) { struct xgmac_priv *priv = netdev_priv(dev); unsigned int bfsize; /* Set the Buffer size according to the MTU; * The total buffer size including any IP offset must be a multiple * of 8 bytes. */ bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8); netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize); priv->rx_skbuff = kcalloc(DMA_RX_RING_SZ, sizeof(struct sk_buff *), GFP_KERNEL); if (!priv->rx_skbuff) return -ENOMEM; priv->dma_rx = dma_alloc_coherent(priv->device, DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), &priv->dma_rx_phy, GFP_KERNEL); if (!priv->dma_rx) goto err_dma_rx; priv->tx_skbuff = kcalloc(DMA_TX_RING_SZ, sizeof(struct sk_buff *), GFP_KERNEL); if (!priv->tx_skbuff) goto err_tx_skb; priv->dma_tx = dma_alloc_coherent(priv->device, DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc), &priv->dma_tx_phy, GFP_KERNEL); if (!priv->dma_tx) goto err_dma_tx; netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, " "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", priv->dma_rx, priv->dma_tx, (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); priv->rx_tail = 0; priv->rx_head = 0; priv->dma_buf_sz = bfsize; desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz); xgmac_rx_refill(priv); priv->tx_tail = 0; priv->tx_head = 0; desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR); return 0; err_dma_tx: kfree(priv->tx_skbuff); err_tx_skb: dma_free_coherent(priv->device, DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), priv->dma_rx, priv->dma_rx_phy); err_dma_rx: kfree(priv->rx_skbuff); return -ENOMEM; } static void xgmac_free_rx_skbufs(struct xgmac_priv *priv) { int i; struct xgmac_dma_desc *p; if (!priv->rx_skbuff) return; for (i = 0; i < DMA_RX_RING_SZ; i++) { struct sk_buff *skb = priv->rx_skbuff[i]; if (skb == NULL) continue; p = priv->dma_rx + i; dma_unmap_single(priv->device, desc_get_buf_addr(p), priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); priv->rx_skbuff[i] = NULL; } } static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) { int i; struct xgmac_dma_desc *p; if (!priv->tx_skbuff) return; for (i = 0; i < DMA_TX_RING_SZ; i++) { if (priv->tx_skbuff[i] == NULL) continue; p = priv->dma_tx + i; if (desc_get_tx_fs(p)) dma_unmap_single(priv->device, desc_get_buf_addr(p), desc_get_buf_len(p), DMA_TO_DEVICE); else dma_unmap_page(priv->device, desc_get_buf_addr(p), desc_get_buf_len(p), DMA_TO_DEVICE); if (desc_get_tx_ls(p)) dev_kfree_skb_any(priv->tx_skbuff[i]); priv->tx_skbuff[i] = NULL; } } static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv) { /* Release the DMA TX/RX socket buffers */ xgmac_free_rx_skbufs(priv); xgmac_free_tx_skbufs(priv); /* Free the consistent memory allocated for descriptor rings */ if (priv->dma_tx) { dma_free_coherent(priv->device, DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc), priv->dma_tx, priv->dma_tx_phy); priv->dma_tx = NULL; } if (priv->dma_rx) { dma_free_coherent(priv->device, DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), priv->dma_rx, priv->dma_rx_phy); priv->dma_rx = NULL; } kfree(priv->rx_skbuff); priv->rx_skbuff = NULL; kfree(priv->tx_skbuff); priv->tx_skbuff = NULL; } /** * xgmac_tx_complete: * @priv: private driver structure * Description: it reclaims resources after transmission completes. */ static void xgmac_tx_complete(struct xgmac_priv *priv) { while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { unsigned int entry = priv->tx_tail; struct sk_buff *skb = priv->tx_skbuff[entry]; struct xgmac_dma_desc *p = priv->dma_tx + entry; /* Check if the descriptor is owned by the DMA. */ if (desc_get_owner(p)) break; netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", priv->tx_head, priv->tx_tail); if (desc_get_tx_fs(p)) dma_unmap_single(priv->device, desc_get_buf_addr(p), desc_get_buf_len(p), DMA_TO_DEVICE); else dma_unmap_page(priv->device, desc_get_buf_addr(p), desc_get_buf_len(p), DMA_TO_DEVICE); /* Check tx error on the last segment */ if (desc_get_tx_ls(p)) { desc_get_tx_status(priv, p); dev_consume_skb_any(skb); } priv->tx_skbuff[entry] = NULL; priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); } /* Ensure tx_tail is visible to xgmac_xmit */ smp_mb(); if (unlikely(netif_queue_stopped(priv->dev) && (tx_dma_ring_space(priv) > MAX_SKB_FRAGS))) netif_wake_queue(priv->dev); } static void xgmac_tx_timeout_work(struct work_struct *work) { u32 reg, value; struct xgmac_priv *priv = container_of(work, struct xgmac_priv, tx_timeout_work); napi_disable(&priv->napi); writel(0, priv->base + XGMAC_DMA_INTR_ENA); netif_tx_lock(priv->dev); reg = readl(priv->base + XGMAC_DMA_CONTROL); writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); do { value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000; } while (value && (value != 0x600000)); xgmac_free_tx_skbufs(priv); desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); priv->tx_tail = 0; priv->tx_head = 0; writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, priv->base + XGMAC_DMA_STATUS); netif_tx_unlock(priv->dev); netif_wake_queue(priv->dev); napi_enable(&priv->napi); /* Enable interrupts */ writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS); writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); } static int xgmac_hw_init(struct net_device *dev) { u32 value, ctrl; int limit; struct xgmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = priv->base; /* Save the ctrl register value */ ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK; /* SW reset */ value = DMA_BUS_MODE_SFT_RESET; writel(value, ioaddr + XGMAC_DMA_BUS_MODE); limit = 15000; while (limit-- && (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) cpu_relax(); if (limit < 0) return -EBUSY; value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) | (0x10 << DMA_BUS_MODE_RPBL_SHIFT) | DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; writel(value, ioaddr + XGMAC_DMA_BUS_MODE); writel(0, ioaddr + XGMAC_DMA_INTR_ENA); /* Mask power mgt interrupt */ writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); /* XGMAC requires AXI bus init. This is a 'magic number' for now */ writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS | XGMAC_CONTROL_CAR; if (dev->features & NETIF_F_RXCSUM) ctrl |= XGMAC_CONTROL_IPC; writel(ctrl, ioaddr + XGMAC_CONTROL); writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL); /* Set the HW DMA mode and the COE */ writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA | XGMAC_OMR_RTC_256, ioaddr + XGMAC_OMR); /* Reset the MMC counters */ writel(1, ioaddr + XGMAC_MMC_CTRL); return 0; } /** * xgmac_open - open entry point of the driver * @dev : pointer to the device structure. * Description: * This function is the open entry point of the driver. * Return value: * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ static int xgmac_open(struct net_device *dev) { int ret; struct xgmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = priv->base; /* Check that the MAC address is valid. If its not, refuse * to bring the device up. The user must specify an * address using the following linux command: * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ if (!is_valid_ether_addr(dev->dev_addr)) { eth_hw_addr_random(dev); netdev_dbg(priv->dev, "generated random MAC address %pM\n", dev->dev_addr); } memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); /* Initialize the XGMAC and descriptors */ xgmac_hw_init(dev); xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause); ret = xgmac_dma_desc_rings_init(dev); if (ret < 0) return ret; /* Enable the MAC Rx/Tx */ xgmac_mac_enable(ioaddr); napi_enable(&priv->napi); netif_start_queue(dev); /* Enable interrupts */ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); return 0; } /** * xgmac_stop - close entry point of the driver * @dev : device pointer. * Description: * This is the stop entry point of the driver. */ static int xgmac_stop(struct net_device *dev) { struct xgmac_priv *priv = netdev_priv(dev); if (readl(priv->base + XGMAC_DMA_INTR_ENA)) napi_disable(&priv->napi); writel(0, priv->base + XGMAC_DMA_INTR_ENA); netif_tx_disable(dev); /* Disable the MAC core */ xgmac_mac_disable(priv->base); /* Release and free the Rx/Tx resources */ xgmac_free_dma_desc_rings(priv); return 0; } /** * xgmac_xmit: * @skb : the socket buffer * @dev : device pointer * Description : Tx entry point of the driver. */ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) { struct xgmac_priv *priv = netdev_priv(dev); unsigned int entry; int i; u32 irq_flag; int nfrags = skb_shinfo(skb)->nr_frags; struct xgmac_dma_desc *desc, *first; unsigned int desc_flags; unsigned int len; dma_addr_t paddr; priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1); irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT; desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ? TXDESC_CSUM_ALL : 0; entry = priv->tx_head; desc = priv->dma_tx + entry; first = desc; len = skb_headlen(skb); paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, paddr)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } priv->tx_skbuff[entry] = skb; desc_set_buf_addr_and_size(desc, paddr, len); for (i = 0; i < nfrags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = skb_frag_size(frag); paddr = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, paddr)) goto dma_err; entry = dma_ring_incr(entry, DMA_TX_RING_SZ); desc = priv->dma_tx + entry; priv->tx_skbuff[entry] = skb; desc_set_buf_addr_and_size(desc, paddr, len); if (i < (nfrags - 1)) desc_set_tx_owner(desc, desc_flags); } /* Interrupt on completition only for the latest segment */ if (desc != first) desc_set_tx_owner(desc, desc_flags | TXDESC_LAST_SEG | irq_flag); else desc_flags |= TXDESC_LAST_SEG | irq_flag; /* Set owner on first desc last to avoid race condition */ wmb(); desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); writel(1, priv->base + XGMAC_DMA_TX_POLL); priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); /* Ensure tx_head update is visible to tx completion */ smp_mb(); if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) { netif_stop_queue(dev); /* Ensure netif_stop_queue is visible to tx completion */ smp_mb(); if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS) netif_start_queue(dev); } return NETDEV_TX_OK; dma_err: entry = priv->tx_head; for ( ; i > 0; i--) { entry = dma_ring_incr(entry, DMA_TX_RING_SZ); desc = priv->dma_tx + entry; priv->tx_skbuff[entry] = NULL; dma_unmap_page(priv->device, desc_get_buf_addr(desc), desc_get_buf_len(desc), DMA_TO_DEVICE); desc_clear_tx_owner(desc); } desc = first; dma_unmap_single(priv->device, desc_get_buf_addr(desc), desc_get_buf_len(desc), DMA_TO_DEVICE); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } static int xgmac_rx(struct xgmac_priv *priv, int limit) { unsigned int entry; unsigned int count = 0; struct xgmac_dma_desc *p; while (count < limit) { int ip_checksum; struct sk_buff *skb; int frame_len; if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ)) break; entry = priv->rx_tail; p = priv->dma_rx + entry; if (desc_get_owner(p)) break; count++; priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ); /* read the status of the incoming frame */ ip_checksum = desc_get_rx_status(priv, p); if (ip_checksum < 0) continue; skb = priv->rx_skbuff[entry]; if (unlikely(!skb)) { netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n"); break; } priv->rx_skbuff[entry] = NULL; frame_len = desc_get_rx_frame_len(p); netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n", frame_len, ip_checksum); skb_put(skb, frame_len); dma_unmap_single(priv->device, desc_get_buf_addr(p), priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); skb->protocol = eth_type_trans(skb, priv->dev); skb->ip_summed = ip_checksum; if (ip_checksum == CHECKSUM_NONE) netif_receive_skb(skb); else napi_gro_receive(&priv->napi, skb); } xgmac_rx_refill(priv); return count; } /** * xgmac_poll - xgmac poll method (NAPI) * @napi : pointer to the napi structure. * @budget : maximum number of packets that the current CPU can receive from * all interfaces. * Description : * This function implements the reception process. * Also it runs the TX completion thread */ static int xgmac_poll(struct napi_struct *napi, int budget) { struct xgmac_priv *priv = container_of(napi, struct xgmac_priv, napi); int work_done = 0; xgmac_tx_complete(priv); work_done = xgmac_rx(priv, budget); if (work_done < budget) { napi_complete_done(napi, work_done); __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); } return work_done; } /** * xgmac_tx_timeout * @dev : Pointer to net device structure * @txqueue: index of the hung transmit queue * * Description: this function is called when a packet transmission fails to * complete within a reasonable tmrate. The driver will mark the error in the * netdev structure and arrange for the device to be reset to a sane state * in order to transmit a new packet. */ static void xgmac_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct xgmac_priv *priv = netdev_priv(dev); schedule_work(&priv->tx_timeout_work); } /** * xgmac_set_rx_mode - entry point for multicast addressing * @dev : pointer to the device structure * Description: * This function is a driver entry point which gets called by the kernel * whenever multicast addresses must be enabled/disabled. * Return value: * void. */ static void xgmac_set_rx_mode(struct net_device *dev) { int i; struct xgmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = priv->base; unsigned int value = 0; u32 hash_filter[XGMAC_NUM_HASH]; int reg = 1; struct netdev_hw_addr *ha; bool use_hash = false; netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n", netdev_mc_count(dev), netdev_uc_count(dev)); if (dev->flags & IFF_PROMISC) value |= XGMAC_FRAME_FILTER_PR; memset(hash_filter, 0, sizeof(hash_filter)); if (netdev_uc_count(dev) > priv->max_macs) { use_hash = true; value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF; } netdev_for_each_uc_addr(ha, dev) { if (use_hash) { u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; /* The most significant 4 bits determine the register to * use (H/L) while the other 5 bits determine the bit * within the register. */ hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); } else { xgmac_set_mac_addr(ioaddr, ha->addr, reg); reg++; } } if (dev->flags & IFF_ALLMULTI) { value |= XGMAC_FRAME_FILTER_PM; goto out; } if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) { use_hash = true; value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; } else { use_hash = false; } netdev_for_each_mc_addr(ha, dev) { if (use_hash) { u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; /* The most significant 4 bits determine the register to * use (H/L) while the other 5 bits determine the bit * within the register. */ hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); } else { xgmac_set_mac_addr(ioaddr, ha->addr, reg); reg++; } } out: for (i = reg; i <= priv->max_macs; i++) xgmac_set_mac_addr(ioaddr, NULL, i); for (i = 0; i < XGMAC_NUM_HASH; i++) writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); writel(value, ioaddr + XGMAC_FRAME_FILTER); } /** * xgmac_change_mtu - entry point to change MTU size for the device. * @dev : device pointer. * @new_mtu : the new MTU size for the device. * Description: the Maximum Transfer Unit (MTU) is used by the network layer * to drive packet transmission. Ethernet has an MTU of 1500 octets * (ETH_DATA_LEN). This value can be changed with ifconfig. * Return value: * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ static int xgmac_change_mtu(struct net_device *dev, int new_mtu) { /* Stop everything, get ready to change the MTU */ if (!netif_running(dev)) return 0; /* Bring interface down, change mtu and bring interface back up */ xgmac_stop(dev); dev->mtu = new_mtu; return xgmac_open(dev); } static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id) { u32 intr_status; struct net_device *dev = (struct net_device *)dev_id; struct xgmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = priv->base; intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT); if (intr_status & XGMAC_INT_STAT_PMT) { netdev_dbg(priv->dev, "received Magic frame\n"); /* clear the PMT bits 5 and 6 by reading the PMT */ readl(ioaddr + XGMAC_PMT); } return IRQ_HANDLED; } static irqreturn_t xgmac_interrupt(int irq, void *dev_id) { u32 intr_status; struct net_device *dev = (struct net_device *)dev_id; struct xgmac_priv *priv = netdev_priv(dev); struct xgmac_extra_stats *x = &priv->xstats; /* read the status register (CSR5) */ intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS); intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA); __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS); /* It displays the DMA process states (CSR5 register) */ /* ABNORMAL interrupts */ if (unlikely(intr_status & DMA_STATUS_AIS)) { if (intr_status & DMA_STATUS_TJT) { netdev_err(priv->dev, "transmit jabber\n"); x->tx_jabber++; } if (intr_status & DMA_STATUS_RU) x->rx_buf_unav++; if (intr_status & DMA_STATUS_RPS) { netdev_err(priv->dev, "receive process stopped\n"); x->rx_process_stopped++; } if (intr_status & DMA_STATUS_ETI) { netdev_err(priv->dev, "transmit early interrupt\n"); x->tx_early++; } if (intr_status & DMA_STATUS_TPS) { netdev_err(priv->dev, "transmit process stopped\n"); x->tx_process_stopped++; schedule_work(&priv->tx_timeout_work); } if (intr_status & DMA_STATUS_FBI) { netdev_err(priv->dev, "fatal bus error\n"); x->fatal_bus_error++; } } /* TX/RX NORMAL interrupts */ if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) { __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA); napi_schedule(&priv->napi); } return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER /* Polling receive - used by NETCONSOLE and other diagnostic tools * to allow network I/O with interrupts disabled. */ static void xgmac_poll_controller(struct net_device *dev) { disable_irq(dev->irq); xgmac_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif static void xgmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) { struct xgmac_priv *priv = netdev_priv(dev); void __iomem *base = priv->base; u32 count; spin_lock_bh(&priv->stats_lock); writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL); storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO); storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32; storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO); storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G); storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR); storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR); storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW); storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO); storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32; count = readl(base + XGMAC_MMC_TXFRAME_GB_LO); storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO); storage->tx_packets = count; storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW); writel(0, base + XGMAC_MMC_CTRL); spin_unlock_bh(&priv->stats_lock); } static int xgmac_set_mac_address(struct net_device *dev, void *p) { struct xgmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = priv->base; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(dev, addr->sa_data); xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); return 0; } static int xgmac_set_features(struct net_device *dev, netdev_features_t features) { u32 ctrl; struct xgmac_priv *priv = netdev_priv(dev); void __iomem *ioaddr = priv->base; netdev_features_t changed = dev->features ^ features; if (!(changed & NETIF_F_RXCSUM)) return 0; ctrl = readl(ioaddr + XGMAC_CONTROL); if (features & NETIF_F_RXCSUM) ctrl |= XGMAC_CONTROL_IPC; else ctrl &= ~XGMAC_CONTROL_IPC; writel(ctrl, ioaddr + XGMAC_CONTROL); return 0; } static const struct net_device_ops xgmac_netdev_ops = { .ndo_open = xgmac_open, .ndo_start_xmit = xgmac_xmit, .ndo_stop = xgmac_stop, .ndo_change_mtu = xgmac_change_mtu, .ndo_set_rx_mode = xgmac_set_rx_mode, .ndo_tx_timeout = xgmac_tx_timeout, .ndo_get_stats64 = xgmac_get_stats64, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xgmac_poll_controller, #endif .ndo_set_mac_address = xgmac_set_mac_address, .ndo_set_features = xgmac_set_features, }; static int xgmac_ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { cmd->base.autoneg = 0; cmd->base.duplex = DUPLEX_FULL; cmd->base.speed = 10000; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 0); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 0); return 0; } static void xgmac_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct xgmac_priv *priv = netdev_priv(netdev); pause->rx_pause = priv->rx_pause; pause->tx_pause = priv->tx_pause; } static int xgmac_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct xgmac_priv *priv = netdev_priv(netdev); if (pause->autoneg) return -EINVAL; return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause); } struct xgmac_stats { char stat_string[ETH_GSTRING_LEN]; int stat_offset; bool is_reg; }; #define XGMAC_STAT(m) \ { #m, offsetof(struct xgmac_priv, xstats.m), false } #define XGMAC_HW_STAT(m, reg_offset) \ { #m, reg_offset, true } static const struct xgmac_stats xgmac_gstrings_stats[] = { XGMAC_STAT(tx_frame_flushed), XGMAC_STAT(tx_payload_error), XGMAC_STAT(tx_ip_header_error), XGMAC_STAT(tx_local_fault), XGMAC_STAT(tx_remote_fault), XGMAC_STAT(tx_early), XGMAC_STAT(tx_process_stopped), XGMAC_STAT(tx_jabber), XGMAC_STAT(rx_buf_unav), XGMAC_STAT(rx_process_stopped), XGMAC_STAT(rx_payload_error), XGMAC_STAT(rx_ip_header_error), XGMAC_STAT(rx_da_filter_fail), XGMAC_STAT(fatal_bus_error), XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME), XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME), XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME), }; #define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats) static void xgmac_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 *data) { struct xgmac_priv *priv = netdev_priv(dev); void *p = priv; int i; for (i = 0; i < XGMAC_STATS_LEN; i++) { if (xgmac_gstrings_stats[i].is_reg) *data++ = readl(priv->base + xgmac_gstrings_stats[i].stat_offset); else *data++ = *(u32 *)(p + xgmac_gstrings_stats[i].stat_offset); } } static int xgmac_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_STATS: return XGMAC_STATS_LEN; default: return -EINVAL; } } static void xgmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) { int i; u8 *p = data; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < XGMAC_STATS_LEN; i++) { memcpy(p, xgmac_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } break; default: WARN_ON(1); break; } } static void xgmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct xgmac_priv *priv = netdev_priv(dev); if (device_can_wakeup(priv->device)) { wol->supported = WAKE_MAGIC | WAKE_UCAST; wol->wolopts = priv->wolopts; } } static int xgmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct xgmac_priv *priv = netdev_priv(dev); u32 support = WAKE_MAGIC | WAKE_UCAST; if (!device_can_wakeup(priv->device)) return -ENOTSUPP; if (wol->wolopts & ~support) return -EINVAL; priv->wolopts = wol->wolopts; if (wol->wolopts) { device_set_wakeup_enable(priv->device, 1); enable_irq_wake(dev->irq); } else { device_set_wakeup_enable(priv->device, 0); disable_irq_wake(dev->irq); } return 0; } static const struct ethtool_ops xgmac_ethtool_ops = { .get_link = ethtool_op_get_link, .get_pauseparam = xgmac_get_pauseparam, .set_pauseparam = xgmac_set_pauseparam, .get_ethtool_stats = xgmac_get_ethtool_stats, .get_strings = xgmac_get_strings, .get_wol = xgmac_get_wol, .set_wol = xgmac_set_wol, .get_sset_count = xgmac_get_sset_count, .get_link_ksettings = xgmac_ethtool_get_link_ksettings, }; /** * xgmac_probe * @pdev: platform device pointer * Description: the driver is initialized through platform_device. */ static int xgmac_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; struct net_device *ndev = NULL; struct xgmac_priv *priv = NULL; u8 addr[ETH_ALEN]; u32 uid; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; if (!request_mem_region(res->start, resource_size(res), pdev->name)) return -EBUSY; ndev = alloc_etherdev(sizeof(struct xgmac_priv)); if (!ndev) { ret = -ENOMEM; goto err_alloc; } SET_NETDEV_DEV(ndev, &pdev->dev); priv = netdev_priv(ndev); platform_set_drvdata(pdev, ndev); ndev->netdev_ops = &xgmac_netdev_ops; ndev->ethtool_ops = &xgmac_ethtool_ops; spin_lock_init(&priv->stats_lock); INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work); priv->device = &pdev->dev; priv->dev = ndev; priv->rx_pause = 1; priv->tx_pause = 1; priv->base = ioremap(res->start, resource_size(res)); if (!priv->base) { netdev_err(ndev, "ioremap failed\n"); ret = -ENOMEM; goto err_io; } uid = readl(priv->base + XGMAC_VERSION); netdev_info(ndev, "h/w version is 0x%x\n", uid); /* Figure out how many valid mac address filter registers we have */ writel(1, priv->base + XGMAC_ADDR_HIGH(31)); if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1) priv->max_macs = 31; else priv->max_macs = 7; writel(0, priv->base + XGMAC_DMA_INTR_ENA); ndev->irq = platform_get_irq(pdev, 0); if (ndev->irq == -ENXIO) { netdev_err(ndev, "No irq resource\n"); ret = ndev->irq; goto err_irq; } ret = request_irq(ndev->irq, xgmac_interrupt, 0, dev_name(&pdev->dev), ndev); if (ret < 0) { netdev_err(ndev, "Could not request irq %d - ret %d)\n", ndev->irq, ret); goto err_irq; } priv->pmt_irq = platform_get_irq(pdev, 1); if (priv->pmt_irq == -ENXIO) { netdev_err(ndev, "No pmt irq resource\n"); ret = priv->pmt_irq; goto err_pmt_irq; } ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0, dev_name(&pdev->dev), ndev); if (ret < 0) { netdev_err(ndev, "Could not request irq %d - ret %d)\n", priv->pmt_irq, ret); goto err_pmt_irq; } device_set_wakeup_capable(&pdev->dev, 1); if (device_can_wakeup(priv->device)) priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA; if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; ndev->features |= ndev->hw_features; ndev->priv_flags |= IFF_UNICAST_FLT; /* MTU range: 46 - 9000 */ ndev->min_mtu = ETH_ZLEN - ETH_HLEN; ndev->max_mtu = XGMAC_MAX_MTU; /* Get the MAC address */ xgmac_get_mac_addr(priv->base, addr, 0); eth_hw_addr_set(ndev, addr); if (!is_valid_ether_addr(ndev->dev_addr)) netdev_warn(ndev, "MAC address %pM not valid", ndev->dev_addr); netif_napi_add(ndev, &priv->napi, xgmac_poll); ret = register_netdev(ndev); if (ret) goto err_reg; return 0; err_reg: netif_napi_del(&priv->napi); free_irq(priv->pmt_irq, ndev); err_pmt_irq: free_irq(ndev->irq, ndev); err_irq: iounmap(priv->base); err_io: free_netdev(ndev); err_alloc: release_mem_region(res->start, resource_size(res)); return ret; } /** * xgmac_remove * @pdev: platform device pointer * Description: this function resets the TX/RX processes, disables the MAC RX/TX * changes the link status, releases the DMA descriptor rings, * unregisters the MDIO bus and unmaps the allocated memory. */ static int xgmac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct xgmac_priv *priv = netdev_priv(ndev); struct resource *res; xgmac_mac_disable(priv->base); /* Free the IRQ lines */ free_irq(ndev->irq, ndev); free_irq(priv->pmt_irq, ndev); unregister_netdev(ndev); netif_napi_del(&priv->napi); iounmap(priv->base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); free_netdev(ndev); return 0; } #ifdef CONFIG_PM_SLEEP static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode) { unsigned int pmt = 0; if (mode & WAKE_MAGIC) pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN; if (mode & WAKE_UCAST) pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; writel(pmt, ioaddr + XGMAC_PMT); } static int xgmac_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct xgmac_priv *priv = netdev_priv(ndev); u32 value; if (!ndev || !netif_running(ndev)) return 0; netif_device_detach(ndev); napi_disable(&priv->napi); writel(0, priv->base + XGMAC_DMA_INTR_ENA); if (device_may_wakeup(priv->device)) { /* Stop TX/RX DMA Only */ value = readl(priv->base + XGMAC_DMA_CONTROL); value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); writel(value, priv->base + XGMAC_DMA_CONTROL); xgmac_pmt(priv->base, priv->wolopts); } else xgmac_mac_disable(priv->base); return 0; } static int xgmac_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct xgmac_priv *priv = netdev_priv(ndev); void __iomem *ioaddr = priv->base; if (!netif_running(ndev)) return 0; xgmac_pmt(ioaddr, 0); /* Enable the MAC and DMA */ xgmac_mac_enable(ioaddr); writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); netif_device_attach(ndev); napi_enable(&priv->napi); return 0; } #endif /* CONFIG_PM_SLEEP */ static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume); static const struct of_device_id xgmac_of_match[] = { { .compatible = "calxeda,hb-xgmac", }, {}, }; MODULE_DEVICE_TABLE(of, xgmac_of_match); static struct platform_driver xgmac_driver = { .driver = { .name = "calxedaxgmac", .of_match_table = xgmac_of_match, .pm = &xgmac_pm_ops, }, .probe = xgmac_probe, .remove = xgmac_remove, }; module_platform_driver(xgmac_driver); MODULE_AUTHOR("Calxeda, Inc."); MODULE_DESCRIPTION("Calxeda 10G XGMAC driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/ethernet/calxeda/xgmac.c
// SPDX-License-Identifier: GPL-2.0 /* Ethernet device driver for Cortina Systems Gemini SoC * Also known as the StorLink SL3512 and SL3516 (SL351x) or Lepus * Net Engine and Gigabit Ethernet MAC (GMAC) * This hardware contains a TCP Offload Engine (TOE) but currently the * driver does not make use of it. * * Authors: * Linus Walleij <[email protected]> * Tobias Waldvogel <[email protected]> (OpenWRT) * Michał Mirosław <[email protected]> * Paulius Zaleckas <[email protected]> * Giuseppe De Robertis <[email protected]> * Gary Chen & Ch Hsu Storlink Semiconductor */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/cache.h> #include <linux/interrupt.h> #include <linux/reset.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_platform.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/skbuff.h> #include <linux/phy.h> #include <linux/crc32.h> #include <linux/ethtool.h> #include <linux/tcp.h> #include <linux/u64_stats_sync.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> #include "gemini.h" #define DRV_NAME "gmac-gemini" #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); #define HSIZE_8 0x00 #define HSIZE_16 0x01 #define HSIZE_32 0x02 #define HBURST_SINGLE 0x00 #define HBURST_INCR 0x01 #define HBURST_INCR4 0x02 #define HBURST_INCR8 0x03 #define HPROT_DATA_CACHE BIT(0) #define HPROT_PRIVILIGED BIT(1) #define HPROT_BUFFERABLE BIT(2) #define HPROT_CACHABLE BIT(3) #define DEFAULT_RX_COALESCE_NSECS 0 #define DEFAULT_GMAC_RXQ_ORDER 9 #define DEFAULT_GMAC_TXQ_ORDER 8 #define DEFAULT_RX_BUF_ORDER 11 #define TX_MAX_FRAGS 16 #define TX_QUEUE_NUM 1 /* max: 6 */ #define RX_MAX_ALLOC_ORDER 2 #define GMAC0_IRQ0_2 (GMAC0_TXDERR_INT_BIT | GMAC0_TXPERR_INT_BIT | \ GMAC0_RXDERR_INT_BIT | GMAC0_RXPERR_INT_BIT) #define GMAC0_IRQ0_TXQ0_INTS (GMAC0_SWTQ00_EOF_INT_BIT | \ GMAC0_SWTQ00_FIN_INT_BIT) #define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT) #define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) /** * struct gmac_queue_page - page buffer per-page info * @page: the page struct * @mapping: the dma address handle */ struct gmac_queue_page { struct page *page; dma_addr_t mapping; }; struct gmac_txq { struct gmac_txdesc *ring; struct sk_buff **skb; unsigned int cptr; unsigned int noirq_packets; }; struct gemini_ethernet; struct gemini_ethernet_port { u8 id; /* 0 or 1 */ struct gemini_ethernet *geth; struct net_device *netdev; struct device *dev; void __iomem *dma_base; void __iomem *gmac_base; struct clk *pclk; struct reset_control *reset; int irq; __le32 mac_addr[3]; void __iomem *rxq_rwptr; struct gmac_rxdesc *rxq_ring; unsigned int rxq_order; struct napi_struct napi; struct hrtimer rx_coalesce_timer; unsigned int rx_coalesce_nsecs; unsigned int freeq_refill; struct gmac_txq txq[TX_QUEUE_NUM]; unsigned int txq_order; unsigned int irq_every_tx_packets; dma_addr_t rxq_dma_base; dma_addr_t txq_dma_base; unsigned int msg_enable; spinlock_t config_lock; /* Locks config register */ struct u64_stats_sync tx_stats_syncp; struct u64_stats_sync rx_stats_syncp; struct u64_stats_sync ir_stats_syncp; struct rtnl_link_stats64 stats; u64 hw_stats[RX_STATS_NUM]; u64 rx_stats[RX_STATUS_NUM]; u64 rx_csum_stats[RX_CHKSUM_NUM]; u64 rx_napi_exits; u64 tx_frag_stats[TX_MAX_FRAGS]; u64 tx_frags_linearized; u64 tx_hw_csummed; }; struct gemini_ethernet { struct device *dev; void __iomem *base; struct gemini_ethernet_port *port0; struct gemini_ethernet_port *port1; bool initialized; spinlock_t irq_lock; /* Locks IRQ-related registers */ unsigned int freeq_order; unsigned int freeq_frag_order; struct gmac_rxdesc *freeq_ring; dma_addr_t freeq_dma_base; struct gmac_queue_page *freeq_pages; unsigned int num_freeq_pages; spinlock_t freeq_lock; /* Locks queue from reentrance */ }; #define GMAC_STATS_NUM ( \ RX_STATS_NUM + RX_STATUS_NUM + RX_CHKSUM_NUM + 1 + \ TX_MAX_FRAGS + 2) static const char gmac_stats_strings[GMAC_STATS_NUM][ETH_GSTRING_LEN] = { "GMAC_IN_DISCARDS", "GMAC_IN_ERRORS", "GMAC_IN_MCAST", "GMAC_IN_BCAST", "GMAC_IN_MAC1", "GMAC_IN_MAC2", "RX_STATUS_GOOD_FRAME", "RX_STATUS_TOO_LONG_GOOD_CRC", "RX_STATUS_RUNT_FRAME", "RX_STATUS_SFD_NOT_FOUND", "RX_STATUS_CRC_ERROR", "RX_STATUS_TOO_LONG_BAD_CRC", "RX_STATUS_ALIGNMENT_ERROR", "RX_STATUS_TOO_LONG_BAD_ALIGN", "RX_STATUS_RX_ERR", "RX_STATUS_DA_FILTERED", "RX_STATUS_BUFFER_FULL", "RX_STATUS_11", "RX_STATUS_12", "RX_STATUS_13", "RX_STATUS_14", "RX_STATUS_15", "RX_CHKSUM_IP_UDP_TCP_OK", "RX_CHKSUM_IP_OK_ONLY", "RX_CHKSUM_NONE", "RX_CHKSUM_3", "RX_CHKSUM_IP_ERR_UNKNOWN", "RX_CHKSUM_IP_ERR", "RX_CHKSUM_TCP_UDP_ERR", "RX_CHKSUM_7", "RX_NAPI_EXITS", "TX_FRAGS[1]", "TX_FRAGS[2]", "TX_FRAGS[3]", "TX_FRAGS[4]", "TX_FRAGS[5]", "TX_FRAGS[6]", "TX_FRAGS[7]", "TX_FRAGS[8]", "TX_FRAGS[9]", "TX_FRAGS[10]", "TX_FRAGS[11]", "TX_FRAGS[12]", "TX_FRAGS[13]", "TX_FRAGS[14]", "TX_FRAGS[15]", "TX_FRAGS[16+]", "TX_FRAGS_LINEARIZED", "TX_HW_CSUMMED", }; static void gmac_dump_dma_state(struct net_device *netdev); static void gmac_update_config0_reg(struct net_device *netdev, u32 val, u32 vmask) { struct gemini_ethernet_port *port = netdev_priv(netdev); unsigned long flags; u32 reg; spin_lock_irqsave(&port->config_lock, flags); reg = readl(port->gmac_base + GMAC_CONFIG0); reg = (reg & ~vmask) | val; writel(reg, port->gmac_base + GMAC_CONFIG0); spin_unlock_irqrestore(&port->config_lock, flags); } static void gmac_enable_tx_rx(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); unsigned long flags; u32 reg; spin_lock_irqsave(&port->config_lock, flags); reg = readl(port->gmac_base + GMAC_CONFIG0); reg &= ~CONFIG0_TX_RX_DISABLE; writel(reg, port->gmac_base + GMAC_CONFIG0); spin_unlock_irqrestore(&port->config_lock, flags); } static void gmac_disable_tx_rx(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); unsigned long flags; u32 val; spin_lock_irqsave(&port->config_lock, flags); val = readl(port->gmac_base + GMAC_CONFIG0); val |= CONFIG0_TX_RX_DISABLE; writel(val, port->gmac_base + GMAC_CONFIG0); spin_unlock_irqrestore(&port->config_lock, flags); mdelay(10); /* let GMAC consume packet */ } static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx) { struct gemini_ethernet_port *port = netdev_priv(netdev); unsigned long flags; u32 val; spin_lock_irqsave(&port->config_lock, flags); val = readl(port->gmac_base + GMAC_CONFIG0); val &= ~CONFIG0_FLOW_CTL; if (tx) val |= CONFIG0_FLOW_TX; if (rx) val |= CONFIG0_FLOW_RX; writel(val, port->gmac_base + GMAC_CONFIG0); spin_unlock_irqrestore(&port->config_lock, flags); } static void gmac_speed_set(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); struct phy_device *phydev = netdev->phydev; union gmac_status status, old_status; int pause_tx = 0; int pause_rx = 0; status.bits32 = readl(port->gmac_base + GMAC_STATUS); old_status.bits32 = status.bits32; status.bits.link = phydev->link; status.bits.duplex = phydev->duplex; switch (phydev->speed) { case 1000: status.bits.speed = GMAC_SPEED_1000; if (phy_interface_mode_is_rgmii(phydev->interface)) status.bits.mii_rmii = GMAC_PHY_RGMII_1000; netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n", phydev_name(phydev)); break; case 100: status.bits.speed = GMAC_SPEED_100; if (phy_interface_mode_is_rgmii(phydev->interface)) status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n", phydev_name(phydev)); break; case 10: status.bits.speed = GMAC_SPEED_10; if (phy_interface_mode_is_rgmii(phydev->interface)) status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n", phydev_name(phydev)); break; default: netdev_warn(netdev, "Unsupported PHY speed (%d) on %s\n", phydev->speed, phydev_name(phydev)); } if (phydev->duplex == DUPLEX_FULL) { u16 lcladv = phy_read(phydev, MII_ADVERTISE); u16 rmtadv = phy_read(phydev, MII_LPA); u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); if (cap & FLOW_CTRL_RX) pause_rx = 1; if (cap & FLOW_CTRL_TX) pause_tx = 1; } gmac_set_flow_control(netdev, pause_tx, pause_rx); if (old_status.bits32 == status.bits32) return; if (netif_msg_link(port)) { phy_print_status(phydev); netdev_info(netdev, "link flow control: %s\n", phydev->pause ? (phydev->asym_pause ? "tx" : "both") : (phydev->asym_pause ? "rx" : "none") ); } gmac_disable_tx_rx(netdev); writel(status.bits32, port->gmac_base + GMAC_STATUS); gmac_enable_tx_rx(netdev); } static int gmac_setup_phy(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); union gmac_status status = { .bits32 = 0 }; struct device *dev = port->dev; struct phy_device *phy; phy = of_phy_get_and_connect(netdev, dev->of_node, gmac_speed_set); if (!phy) return -ENODEV; netdev->phydev = phy; phy_set_max_speed(phy, SPEED_1000); phy_support_asym_pause(phy); /* set PHY interface type */ switch (phy->interface) { case PHY_INTERFACE_MODE_MII: netdev_dbg(netdev, "MII: set GMAC0 to GMII mode, GMAC1 disabled\n"); status.bits.mii_rmii = GMAC_PHY_MII; break; case PHY_INTERFACE_MODE_GMII: netdev_dbg(netdev, "GMII: set GMAC0 to GMII mode, GMAC1 disabled\n"); status.bits.mii_rmii = GMAC_PHY_GMII; break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_TXID: case PHY_INTERFACE_MODE_RGMII_RXID: netdev_dbg(netdev, "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n"); status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; break; default: netdev_err(netdev, "Unsupported MII interface\n"); phy_disconnect(phy); netdev->phydev = NULL; return -EINVAL; } writel(status.bits32, port->gmac_base + GMAC_STATUS); if (netif_msg_link(port)) phy_attached_info(phy); return 0; } /* The maximum frame length is not logically enumerated in the * hardware, so we do a table lookup to find the applicable max * frame length. */ struct gmac_max_framelen { unsigned int max_l3_len; u8 val; }; static const struct gmac_max_framelen gmac_maxlens[] = { { .max_l3_len = 1518, .val = CONFIG0_MAXLEN_1518, }, { .max_l3_len = 1522, .val = CONFIG0_MAXLEN_1522, }, { .max_l3_len = 1536, .val = CONFIG0_MAXLEN_1536, }, { .max_l3_len = 1542, .val = CONFIG0_MAXLEN_1542, }, { .max_l3_len = 9212, .val = CONFIG0_MAXLEN_9k, }, { .max_l3_len = 10236, .val = CONFIG0_MAXLEN_10k, }, }; static int gmac_pick_rx_max_len(unsigned int max_l3_len) { const struct gmac_max_framelen *maxlen; int maxtot; int i; maxtot = max_l3_len + ETH_HLEN + VLAN_HLEN; for (i = 0; i < ARRAY_SIZE(gmac_maxlens); i++) { maxlen = &gmac_maxlens[i]; if (maxtot <= maxlen->max_l3_len) return maxlen->val; } return -1; } static int gmac_init(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); union gmac_config0 config0 = { .bits = { .dis_tx = 1, .dis_rx = 1, .ipv4_rx_chksum = 1, .ipv6_rx_chksum = 1, .rx_err_detect = 1, .rgmm_edge = 1, .port0_chk_hwq = 1, .port1_chk_hwq = 1, .port0_chk_toeq = 1, .port1_chk_toeq = 1, .port0_chk_classq = 1, .port1_chk_classq = 1, } }; union gmac_ahb_weight ahb_weight = { .bits = { .rx_weight = 1, .tx_weight = 1, .hash_weight = 1, .pre_req = 0x1f, .tq_dv_threshold = 0, } }; union gmac_tx_wcr0 hw_weigh = { .bits = { .hw_tq3 = 1, .hw_tq2 = 1, .hw_tq1 = 1, .hw_tq0 = 1, } }; union gmac_tx_wcr1 sw_weigh = { .bits = { .sw_tq5 = 1, .sw_tq4 = 1, .sw_tq3 = 1, .sw_tq2 = 1, .sw_tq1 = 1, .sw_tq0 = 1, } }; union gmac_config1 config1 = { .bits = { .set_threshold = 16, .rel_threshold = 24, } }; union gmac_config2 config2 = { .bits = { .set_threshold = 16, .rel_threshold = 32, } }; union gmac_config3 config3 = { .bits = { .set_threshold = 0, .rel_threshold = 0, } }; union gmac_config0 tmp; config0.bits.max_len = gmac_pick_rx_max_len(netdev->mtu); tmp.bits32 = readl(port->gmac_base + GMAC_CONFIG0); config0.bits.reserved = tmp.bits.reserved; writel(config0.bits32, port->gmac_base + GMAC_CONFIG0); writel(config1.bits32, port->gmac_base + GMAC_CONFIG1); writel(config2.bits32, port->gmac_base + GMAC_CONFIG2); writel(config3.bits32, port->gmac_base + GMAC_CONFIG3); readl(port->dma_base + GMAC_AHB_WEIGHT_REG); writel(ahb_weight.bits32, port->dma_base + GMAC_AHB_WEIGHT_REG); writel(hw_weigh.bits32, port->dma_base + GMAC_TX_WEIGHTING_CTRL_0_REG); writel(sw_weigh.bits32, port->dma_base + GMAC_TX_WEIGHTING_CTRL_1_REG); port->rxq_order = DEFAULT_GMAC_RXQ_ORDER; port->txq_order = DEFAULT_GMAC_TXQ_ORDER; port->rx_coalesce_nsecs = DEFAULT_RX_COALESCE_NSECS; /* Mark every quarter of the queue a packet for interrupt * in order to be able to wake up the queue if it was stopped */ port->irq_every_tx_packets = 1 << (port->txq_order - 2); return 0; } static int gmac_setup_txqs(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); unsigned int n_txq = netdev->num_tx_queues; struct gemini_ethernet *geth = port->geth; size_t entries = 1 << port->txq_order; struct gmac_txq *txq = port->txq; struct gmac_txdesc *desc_ring; size_t len = n_txq * entries; struct sk_buff **skb_tab; void __iomem *rwptr_reg; unsigned int r; int i; rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG; skb_tab = kcalloc(len, sizeof(*skb_tab), GFP_KERNEL); if (!skb_tab) return -ENOMEM; desc_ring = dma_alloc_coherent(geth->dev, len * sizeof(*desc_ring), &port->txq_dma_base, GFP_KERNEL); if (!desc_ring) { kfree(skb_tab); return -ENOMEM; } if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { dev_warn(geth->dev, "TX queue base is not aligned\n"); dma_free_coherent(geth->dev, len * sizeof(*desc_ring), desc_ring, port->txq_dma_base); kfree(skb_tab); return -ENOMEM; } writel(port->txq_dma_base | port->txq_order, port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG); for (i = 0; i < n_txq; i++) { txq->ring = desc_ring; txq->skb = skb_tab; txq->noirq_packets = 0; r = readw(rwptr_reg); rwptr_reg += 2; writew(r, rwptr_reg); rwptr_reg += 2; txq->cptr = r; txq++; desc_ring += entries; skb_tab += entries; } return 0; } static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq, unsigned int r) { struct gemini_ethernet_port *port = netdev_priv(netdev); unsigned int m = (1 << port->txq_order) - 1; struct gemini_ethernet *geth = port->geth; unsigned int c = txq->cptr; union gmac_txdesc_0 word0; union gmac_txdesc_1 word1; unsigned int hwchksum = 0; unsigned long bytes = 0; struct gmac_txdesc *txd; unsigned short nfrags; unsigned int errs = 0; unsigned int pkts = 0; unsigned int word3; dma_addr_t mapping; if (c == r) return; while (c != r) { txd = txq->ring + c; word0 = txd->word0; word1 = txd->word1; mapping = txd->word2.buf_adr; word3 = txd->word3.bits32; dma_unmap_single(geth->dev, mapping, word0.bits.buffer_size, DMA_TO_DEVICE); if (word3 & EOF_BIT) dev_kfree_skb(txq->skb[c]); c++; c &= m; if (!(word3 & SOF_BIT)) continue; if (!word0.bits.status_tx_ok) { errs++; continue; } pkts++; bytes += txd->word1.bits.byte_count; if (word1.bits32 & TSS_CHECKUM_ENABLE) hwchksum++; nfrags = word0.bits.desc_count - 1; if (nfrags) { if (nfrags >= TX_MAX_FRAGS) nfrags = TX_MAX_FRAGS - 1; u64_stats_update_begin(&port->tx_stats_syncp); port->tx_frag_stats[nfrags]++; u64_stats_update_end(&port->tx_stats_syncp); } } u64_stats_update_begin(&port->ir_stats_syncp); port->stats.tx_errors += errs; port->stats.tx_packets += pkts; port->stats.tx_bytes += bytes; port->tx_hw_csummed += hwchksum; u64_stats_update_end(&port->ir_stats_syncp); txq->cptr = c; } static void gmac_cleanup_txqs(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); unsigned int n_txq = netdev->num_tx_queues; struct gemini_ethernet *geth = port->geth; void __iomem *rwptr_reg; unsigned int r, i; rwptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG; for (i = 0; i < n_txq; i++) { r = readw(rwptr_reg); rwptr_reg += 2; writew(r, rwptr_reg); rwptr_reg += 2; gmac_clean_txq(netdev, port->txq + i, r); } writel(0, port->dma_base + GMAC_SW_TX_QUEUE_BASE_REG); kfree(port->txq->skb); dma_free_coherent(geth->dev, n_txq * sizeof(*port->txq->ring) << port->txq_order, port->txq->ring, port->txq_dma_base); } static int gmac_setup_rxq(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); struct gemini_ethernet *geth = port->geth; struct nontoe_qhdr __iomem *qhdr; qhdr = geth->base + TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id); port->rxq_rwptr = &qhdr->word1; /* Remap a slew of memory to use for the RX queue */ port->rxq_ring = dma_alloc_coherent(geth->dev, sizeof(*port->rxq_ring) << port->rxq_order, &port->rxq_dma_base, GFP_KERNEL); if (!port->rxq_ring) return -ENOMEM; if (port->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK) { dev_warn(geth->dev, "RX queue base is not aligned\n"); return -ENOMEM; } writel(port->rxq_dma_base | port->rxq_order, &qhdr->word0); writel(0, port->rxq_rwptr); return 0; } static struct gmac_queue_page * gmac_get_queue_page(struct gemini_ethernet *geth, struct gemini_ethernet_port *port, dma_addr_t addr) { struct gmac_queue_page *gpage; dma_addr_t mapping; int i; /* Only look for even pages */ mapping = addr & PAGE_MASK; if (!geth->freeq_pages) { dev_err(geth->dev, "try to get page with no page list\n"); return NULL; } /* Look up a ring buffer page from virtual mapping */ for (i = 0; i < geth->num_freeq_pages; i++) { gpage = &geth->freeq_pages[i]; if (gpage->mapping == mapping) return gpage; } return NULL; } static void gmac_cleanup_rxq(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); struct gemini_ethernet *geth = port->geth; struct gmac_rxdesc *rxd = port->rxq_ring; static struct gmac_queue_page *gpage; struct nontoe_qhdr __iomem *qhdr; void __iomem *dma_reg; void __iomem *ptr_reg; dma_addr_t mapping; union dma_rwptr rw; unsigned int r, w; qhdr = geth->base + TOE_DEFAULT_Q_HDR_BASE(netdev->dev_id); dma_reg = &qhdr->word0; ptr_reg = &qhdr->word1; rw.bits32 = readl(ptr_reg); r = rw.bits.rptr; w = rw.bits.wptr; writew(r, ptr_reg + 2); writel(0, dma_reg); /* Loop from read pointer to write pointer of the RX queue * and free up all pages by the queue. */ while (r != w) { mapping = rxd[r].word2.buf_adr; r++; r &= ((1 << port->rxq_order) - 1); if (!mapping) continue; /* Freeq pointers are one page off */ gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE); if (!gpage) { dev_err(geth->dev, "could not find page\n"); continue; } /* Release the RX queue reference to the page */ put_page(gpage->page); } dma_free_coherent(geth->dev, sizeof(*port->rxq_ring) << port->rxq_order, port->rxq_ring, port->rxq_dma_base); } static struct page *geth_freeq_alloc_map_page(struct gemini_ethernet *geth, int pn) { struct gmac_rxdesc *freeq_entry; struct gmac_queue_page *gpage; unsigned int fpp_order; unsigned int frag_len; dma_addr_t mapping; struct page *page; int i; /* First allocate and DMA map a single page */ page = alloc_page(GFP_ATOMIC); if (!page) return NULL; mapping = dma_map_single(geth->dev, page_address(page), PAGE_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(geth->dev, mapping)) { put_page(page); return NULL; } /* The assign the page mapping (physical address) to the buffer address * in the hardware queue. PAGE_SHIFT on ARM is 12 (1 page is 4096 bytes, * 4k), and the default RX frag order is 11 (fragments are up 20 2048 * bytes, 2k) so fpp_order (fragments per page order) is default 1. Thus * each page normally needs two entries in the queue. */ frag_len = 1 << geth->freeq_frag_order; /* Usually 2048 */ fpp_order = PAGE_SHIFT - geth->freeq_frag_order; freeq_entry = geth->freeq_ring + (pn << fpp_order); dev_dbg(geth->dev, "allocate page %d fragment length %d fragments per page %d, freeq entry %p\n", pn, frag_len, (1 << fpp_order), freeq_entry); for (i = (1 << fpp_order); i > 0; i--) { freeq_entry->word2.buf_adr = mapping; freeq_entry++; mapping += frag_len; } /* If the freeq entry already has a page mapped, then unmap it. */ gpage = &geth->freeq_pages[pn]; if (gpage->page) { mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr; dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE); /* This should be the last reference to the page so it gets * released */ put_page(gpage->page); } /* Then put our new mapping into the page table */ dev_dbg(geth->dev, "page %d, DMA addr: %08x, page %p\n", pn, (unsigned int)mapping, page); gpage->mapping = mapping; gpage->page = page; return page; } /** * geth_fill_freeq() - Fill the freeq with empty fragments to use * @geth: the ethernet adapter * @refill: whether to reset the queue by filling in all freeq entries or * just refill it, usually the interrupt to refill the queue happens when * the queue is half empty. */ static unsigned int geth_fill_freeq(struct gemini_ethernet *geth, bool refill) { unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order; unsigned int count = 0; unsigned int pn, epn; unsigned long flags; union dma_rwptr rw; unsigned int m_pn; /* Mask for page */ m_pn = (1 << (geth->freeq_order - fpp_order)) - 1; spin_lock_irqsave(&geth->freeq_lock, flags); rw.bits32 = readl(geth->base + GLOBAL_SWFQ_RWPTR_REG); pn = (refill ? rw.bits.wptr : rw.bits.rptr) >> fpp_order; epn = (rw.bits.rptr >> fpp_order) - 1; epn &= m_pn; /* Loop over the freeq ring buffer entries */ while (pn != epn) { struct gmac_queue_page *gpage; struct page *page; gpage = &geth->freeq_pages[pn]; page = gpage->page; dev_dbg(geth->dev, "fill entry %d page ref count %d add %d refs\n", pn, page_ref_count(page), 1 << fpp_order); if (page_ref_count(page) > 1) { unsigned int fl = (pn - epn) & m_pn; if (fl > 64 >> fpp_order) break; page = geth_freeq_alloc_map_page(geth, pn); if (!page) break; } /* Add one reference per fragment in the page */ page_ref_add(page, 1 << fpp_order); count += 1 << fpp_order; pn++; pn &= m_pn; } writew(pn << fpp_order, geth->base + GLOBAL_SWFQ_RWPTR_REG + 2); spin_unlock_irqrestore(&geth->freeq_lock, flags); return count; } static int geth_setup_freeq(struct gemini_ethernet *geth) { unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order; unsigned int frag_len = 1 << geth->freeq_frag_order; unsigned int len = 1 << geth->freeq_order; unsigned int pages = len >> fpp_order; union queue_threshold qt; union dma_skb_size skbsz; unsigned int filled; unsigned int pn; geth->freeq_ring = dma_alloc_coherent(geth->dev, sizeof(*geth->freeq_ring) << geth->freeq_order, &geth->freeq_dma_base, GFP_KERNEL); if (!geth->freeq_ring) return -ENOMEM; if (geth->freeq_dma_base & ~DMA_Q_BASE_MASK) { dev_warn(geth->dev, "queue ring base is not aligned\n"); goto err_freeq; } /* Allocate a mapping to page look-up index */ geth->freeq_pages = kcalloc(pages, sizeof(*geth->freeq_pages), GFP_KERNEL); if (!geth->freeq_pages) goto err_freeq; geth->num_freeq_pages = pages; dev_info(geth->dev, "allocate %d pages for queue\n", pages); for (pn = 0; pn < pages; pn++) if (!geth_freeq_alloc_map_page(geth, pn)) goto err_freeq_alloc; filled = geth_fill_freeq(geth, false); if (!filled) goto err_freeq_alloc; qt.bits32 = readl(geth->base + GLOBAL_QUEUE_THRESHOLD_REG); qt.bits.swfq_empty = 32; writel(qt.bits32, geth->base + GLOBAL_QUEUE_THRESHOLD_REG); skbsz.bits.sw_skb_size = 1 << geth->freeq_frag_order; writel(skbsz.bits32, geth->base + GLOBAL_DMA_SKB_SIZE_REG); writel(geth->freeq_dma_base | geth->freeq_order, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG); return 0; err_freeq_alloc: while (pn > 0) { struct gmac_queue_page *gpage; dma_addr_t mapping; --pn; mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr; dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE); gpage = &geth->freeq_pages[pn]; put_page(gpage->page); } kfree(geth->freeq_pages); err_freeq: dma_free_coherent(geth->dev, sizeof(*geth->freeq_ring) << geth->freeq_order, geth->freeq_ring, geth->freeq_dma_base); geth->freeq_ring = NULL; return -ENOMEM; } /** * geth_cleanup_freeq() - cleanup the DMA mappings and free the queue * @geth: the Gemini global ethernet state */ static void geth_cleanup_freeq(struct gemini_ethernet *geth) { unsigned int fpp_order = PAGE_SHIFT - geth->freeq_frag_order; unsigned int frag_len = 1 << geth->freeq_frag_order; unsigned int len = 1 << geth->freeq_order; unsigned int pages = len >> fpp_order; unsigned int pn; writew(readw(geth->base + GLOBAL_SWFQ_RWPTR_REG), geth->base + GLOBAL_SWFQ_RWPTR_REG + 2); writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG); for (pn = 0; pn < pages; pn++) { struct gmac_queue_page *gpage; dma_addr_t mapping; mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr; dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE); gpage = &geth->freeq_pages[pn]; while (page_ref_count(gpage->page) > 0) put_page(gpage->page); } kfree(geth->freeq_pages); dma_free_coherent(geth->dev, sizeof(*geth->freeq_ring) << geth->freeq_order, geth->freeq_ring, geth->freeq_dma_base); } /** * geth_resize_freeq() - resize the software queue depth * @port: the port requesting the change * * This gets called at least once during probe() so the device queue gets * "resized" from the hardware defaults. Since both ports/net devices share * the same hardware queue, some synchronization between the ports is * needed. */ static int geth_resize_freeq(struct gemini_ethernet_port *port) { struct gemini_ethernet *geth = port->geth; struct net_device *netdev = port->netdev; struct gemini_ethernet_port *other_port; struct net_device *other_netdev; unsigned int new_size = 0; unsigned int new_order; unsigned long flags; u32 en; int ret; if (netdev->dev_id == 0) other_netdev = geth->port1->netdev; else other_netdev = geth->port0->netdev; if (other_netdev && netif_running(other_netdev)) return -EBUSY; new_size = 1 << (port->rxq_order + 1); netdev_dbg(netdev, "port %d size: %d order %d\n", netdev->dev_id, new_size, port->rxq_order); if (other_netdev) { other_port = netdev_priv(other_netdev); new_size += 1 << (other_port->rxq_order + 1); netdev_dbg(other_netdev, "port %d size: %d order %d\n", other_netdev->dev_id, (1 << (other_port->rxq_order + 1)), other_port->rxq_order); } new_order = min(15, ilog2(new_size - 1) + 1); dev_dbg(geth->dev, "set shared queue to size %d order %d\n", new_size, new_order); if (geth->freeq_order == new_order) return 0; spin_lock_irqsave(&geth->irq_lock, flags); /* Disable the software queue IRQs */ en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); en &= ~SWFQ_EMPTY_INT_BIT; writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); spin_unlock_irqrestore(&geth->irq_lock, flags); /* Drop the old queue */ if (geth->freeq_ring) geth_cleanup_freeq(geth); /* Allocate a new queue with the desired order */ geth->freeq_order = new_order; ret = geth_setup_freeq(geth); /* Restart the interrupts - NOTE if this is the first resize * after probe(), this is where the interrupts get turned on * in the first place. */ spin_lock_irqsave(&geth->irq_lock, flags); en |= SWFQ_EMPTY_INT_BIT; writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); spin_unlock_irqrestore(&geth->irq_lock, flags); return ret; } static void gmac_tx_irq_enable(struct net_device *netdev, unsigned int txq, int en) { struct gemini_ethernet_port *port = netdev_priv(netdev); struct gemini_ethernet *geth = port->geth; u32 val, mask; netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id); mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq); if (en) writel(mask, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG); val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); val = en ? val | mask : val & ~mask; writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); } static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num) { struct netdev_queue *ntxq = netdev_get_tx_queue(netdev, txq_num); gmac_tx_irq_enable(netdev, txq_num, 0); netif_tx_wake_queue(ntxq); } static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, struct gmac_txq *txq, unsigned short *desc) { struct gemini_ethernet_port *port = netdev_priv(netdev); struct skb_shared_info *skb_si = skb_shinfo(skb); unsigned short m = (1 << port->txq_order) - 1; short frag, last_frag = skb_si->nr_frags - 1; struct gemini_ethernet *geth = port->geth; unsigned int word1, word3, buflen; unsigned short w = *desc; struct gmac_txdesc *txd; skb_frag_t *skb_frag; dma_addr_t mapping; unsigned short mtu; void *buffer; mtu = ETH_HLEN; mtu += netdev->mtu; if (skb->protocol == htons(ETH_P_8021Q)) mtu += VLAN_HLEN; word1 = skb->len; word3 = SOF_BIT; if (word1 > mtu) { word1 |= TSS_MTU_ENABLE_BIT; word3 |= mtu; } if (skb->ip_summed != CHECKSUM_NONE) { int tcp = 0; if (skb->protocol == htons(ETH_P_IP)) { word1 |= TSS_IP_CHKSUM_BIT; tcp = ip_hdr(skb)->protocol == IPPROTO_TCP; } else { /* IPv6 */ word1 |= TSS_IPV6_ENABLE_BIT; tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP; } word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT; } frag = -1; while (frag <= last_frag) { if (frag == -1) { buffer = skb->data; buflen = skb_headlen(skb); } else { skb_frag = skb_si->frags + frag; buffer = skb_frag_address(skb_frag); buflen = skb_frag_size(skb_frag); } if (frag == last_frag) { word3 |= EOF_BIT; txq->skb[w] = skb; } mapping = dma_map_single(geth->dev, buffer, buflen, DMA_TO_DEVICE); if (dma_mapping_error(geth->dev, mapping)) goto map_error; txd = txq->ring + w; txd->word0.bits32 = buflen; txd->word1.bits32 = word1; txd->word2.buf_adr = mapping; txd->word3.bits32 = word3; word3 &= MTU_SIZE_BIT_MASK; w++; w &= m; frag++; } *desc = w; return 0; map_error: while (w != *desc) { w--; w &= m; dma_unmap_page(geth->dev, txq->ring[w].word2.buf_adr, txq->ring[w].word0.bits.buffer_size, DMA_TO_DEVICE); } return -ENOMEM; } static netdev_tx_t gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); unsigned short m = (1 << port->txq_order) - 1; struct netdev_queue *ntxq; unsigned short r, w, d; void __iomem *ptr_reg; struct gmac_txq *txq; int txq_num, nfrags; union dma_rwptr rw; if (skb->len >= 0x10000) goto out_drop_free; txq_num = skb_get_queue_mapping(skb); ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE_PTR_REG(txq_num); txq = &port->txq[txq_num]; ntxq = netdev_get_tx_queue(netdev, txq_num); nfrags = skb_shinfo(skb)->nr_frags; rw.bits32 = readl(ptr_reg); r = rw.bits.rptr; w = rw.bits.wptr; d = txq->cptr - w - 1; d &= m; if (d < nfrags + 2) { gmac_clean_txq(netdev, txq, r); d = txq->cptr - w - 1; d &= m; if (d < nfrags + 2) { netif_tx_stop_queue(ntxq); d = txq->cptr + nfrags + 16; d &= m; txq->ring[d].word3.bits.eofie = 1; gmac_tx_irq_enable(netdev, txq_num, 1); u64_stats_update_begin(&port->tx_stats_syncp); netdev->stats.tx_fifo_errors++; u64_stats_update_end(&port->tx_stats_syncp); return NETDEV_TX_BUSY; } } if (gmac_map_tx_bufs(netdev, skb, txq, &w)) { if (skb_linearize(skb)) goto out_drop; u64_stats_update_begin(&port->tx_stats_syncp); port->tx_frags_linearized++; u64_stats_update_end(&port->tx_stats_syncp); if (gmac_map_tx_bufs(netdev, skb, txq, &w)) goto out_drop_free; } writew(w, ptr_reg + 2); gmac_clean_txq(netdev, txq, r); return NETDEV_TX_OK; out_drop_free: dev_kfree_skb(skb); out_drop: u64_stats_update_begin(&port->tx_stats_syncp); port->stats.tx_dropped++; u64_stats_update_end(&port->tx_stats_syncp); return NETDEV_TX_OK; } static void gmac_tx_timeout(struct net_device *netdev, unsigned int txqueue) { netdev_err(netdev, "Tx timeout\n"); gmac_dump_dma_state(netdev); } static void gmac_enable_irq(struct net_device *netdev, int enable) { struct gemini_ethernet_port *port = netdev_priv(netdev); struct gemini_ethernet *geth = port->geth; unsigned long flags; u32 val, mask; netdev_dbg(netdev, "%s device %d %s\n", __func__, netdev->dev_id, enable ? "enable" : "disable"); spin_lock_irqsave(&geth->irq_lock, flags); mask = GMAC0_IRQ0_2 << (netdev->dev_id * 2); val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); val = enable ? (val | mask) : (val & ~mask); writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); mask = DEFAULT_Q0_INT_BIT << netdev->dev_id; val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); val = enable ? (val | mask) : (val & ~mask); writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); mask = GMAC0_IRQ4_8 << (netdev->dev_id * 8); val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); val = enable ? (val | mask) : (val & ~mask); writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); spin_unlock_irqrestore(&geth->irq_lock, flags); } static void gmac_enable_rx_irq(struct net_device *netdev, int enable) { struct gemini_ethernet_port *port = netdev_priv(netdev); struct gemini_ethernet *geth = port->geth; unsigned long flags; u32 val, mask; netdev_dbg(netdev, "%s device %d %s\n", __func__, netdev->dev_id, enable ? "enable" : "disable"); spin_lock_irqsave(&geth->irq_lock, flags); mask = DEFAULT_Q0_INT_BIT << netdev->dev_id; val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); val = enable ? (val | mask) : (val & ~mask); writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); spin_unlock_irqrestore(&geth->irq_lock, flags); } static struct sk_buff *gmac_skb_if_good_frame(struct gemini_ethernet_port *port, union gmac_rxdesc_0 word0, unsigned int frame_len) { unsigned int rx_csum = word0.bits.chksum_status; unsigned int rx_status = word0.bits.status; struct sk_buff *skb = NULL; port->rx_stats[rx_status]++; port->rx_csum_stats[rx_csum]++; if (word0.bits.derr || word0.bits.perr || rx_status || frame_len < ETH_ZLEN || rx_csum >= RX_CHKSUM_IP_ERR_UNKNOWN) { port->stats.rx_errors++; if (frame_len < ETH_ZLEN || RX_ERROR_LENGTH(rx_status)) port->stats.rx_length_errors++; if (RX_ERROR_OVER(rx_status)) port->stats.rx_over_errors++; if (RX_ERROR_CRC(rx_status)) port->stats.rx_crc_errors++; if (RX_ERROR_FRAME(rx_status)) port->stats.rx_frame_errors++; return NULL; } skb = napi_get_frags(&port->napi); if (!skb) goto update_exit; if (rx_csum == RX_CHKSUM_IP_UDP_TCP_OK) skb->ip_summed = CHECKSUM_UNNECESSARY; update_exit: port->stats.rx_bytes += frame_len; port->stats.rx_packets++; return skb; } static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget) { struct gemini_ethernet_port *port = netdev_priv(netdev); unsigned short m = (1 << port->rxq_order) - 1; struct gemini_ethernet *geth = port->geth; void __iomem *ptr_reg = port->rxq_rwptr; unsigned int frame_len, frag_len; struct gmac_rxdesc *rx = NULL; struct gmac_queue_page *gpage; static struct sk_buff *skb; union gmac_rxdesc_0 word0; union gmac_rxdesc_1 word1; union gmac_rxdesc_3 word3; struct page *page = NULL; unsigned int page_offs; unsigned short r, w; union dma_rwptr rw; dma_addr_t mapping; int frag_nr = 0; rw.bits32 = readl(ptr_reg); /* Reset interrupt as all packages until here are taken into account */ writel(DEFAULT_Q0_INT_BIT << netdev->dev_id, geth->base + GLOBAL_INTERRUPT_STATUS_1_REG); r = rw.bits.rptr; w = rw.bits.wptr; while (budget && w != r) { rx = port->rxq_ring + r; word0 = rx->word0; word1 = rx->word1; mapping = rx->word2.buf_adr; word3 = rx->word3; r++; r &= m; frag_len = word0.bits.buffer_size; frame_len = word1.bits.byte_count; page_offs = mapping & ~PAGE_MASK; if (!mapping) { netdev_err(netdev, "rxq[%u]: HW BUG: zero DMA desc\n", r); goto err_drop; } /* Freeq pointers are one page off */ gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE); if (!gpage) { dev_err(geth->dev, "could not find mapping\n"); continue; } page = gpage->page; if (word3.bits32 & SOF_BIT) { if (skb) { napi_free_frags(&port->napi); port->stats.rx_dropped++; } skb = gmac_skb_if_good_frame(port, word0, frame_len); if (!skb) goto err_drop; page_offs += NET_IP_ALIGN; frag_len -= NET_IP_ALIGN; frag_nr = 0; } else if (!skb) { put_page(page); continue; } if (word3.bits32 & EOF_BIT) frag_len = frame_len - skb->len; /* append page frag to skb */ if (frag_nr == MAX_SKB_FRAGS) goto err_drop; if (frag_len == 0) netdev_err(netdev, "Received fragment with len = 0\n"); skb_fill_page_desc(skb, frag_nr, page, page_offs, frag_len); skb->len += frag_len; skb->data_len += frag_len; skb->truesize += frag_len; frag_nr++; if (word3.bits32 & EOF_BIT) { napi_gro_frags(&port->napi); skb = NULL; --budget; } continue; err_drop: if (skb) { napi_free_frags(&port->napi); skb = NULL; } if (mapping) put_page(page); port->stats.rx_dropped++; } writew(r, ptr_reg); return budget; } static int gmac_napi_poll(struct napi_struct *napi, int budget) { struct gemini_ethernet_port *port = netdev_priv(napi->dev); struct gemini_ethernet *geth = port->geth; unsigned int freeq_threshold; unsigned int received; freeq_threshold = 1 << (geth->freeq_order - 1); u64_stats_update_begin(&port->rx_stats_syncp); received = gmac_rx(napi->dev, budget); if (received < budget) { napi_gro_flush(napi, false); napi_complete_done(napi, received); gmac_enable_rx_irq(napi->dev, 1); ++port->rx_napi_exits; } port->freeq_refill += (budget - received); if (port->freeq_refill > freeq_threshold) { port->freeq_refill -= freeq_threshold; geth_fill_freeq(geth, true); } u64_stats_update_end(&port->rx_stats_syncp); return received; } static void gmac_dump_dma_state(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); struct gemini_ethernet *geth = port->geth; void __iomem *ptr_reg; u32 reg[5]; /* Interrupt status */ reg[0] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_0_REG); reg[1] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_1_REG); reg[2] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_2_REG); reg[3] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_3_REG); reg[4] = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); netdev_err(netdev, "IRQ status: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", reg[0], reg[1], reg[2], reg[3], reg[4]); /* Interrupt enable */ reg[0] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); reg[1] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); reg[2] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG); reg[3] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG); reg[4] = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); netdev_err(netdev, "IRQ enable: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", reg[0], reg[1], reg[2], reg[3], reg[4]); /* RX DMA status */ reg[0] = readl(port->dma_base + GMAC_DMA_RX_FIRST_DESC_REG); reg[1] = readl(port->dma_base + GMAC_DMA_RX_CURR_DESC_REG); reg[2] = GET_RPTR(port->rxq_rwptr); reg[3] = GET_WPTR(port->rxq_rwptr); netdev_err(netdev, "RX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n", reg[0], reg[1], reg[2], reg[3]); reg[0] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD0_REG); reg[1] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD1_REG); reg[2] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD2_REG); reg[3] = readl(port->dma_base + GMAC_DMA_RX_DESC_WORD3_REG); netdev_err(netdev, "RX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n", reg[0], reg[1], reg[2], reg[3]); /* TX DMA status */ ptr_reg = port->dma_base + GMAC_SW_TX_QUEUE0_PTR_REG; reg[0] = readl(port->dma_base + GMAC_DMA_TX_FIRST_DESC_REG); reg[1] = readl(port->dma_base + GMAC_DMA_TX_CURR_DESC_REG); reg[2] = GET_RPTR(ptr_reg); reg[3] = GET_WPTR(ptr_reg); netdev_err(netdev, "TX DMA regs: 0x%08x 0x%08x, ptr: %u %u\n", reg[0], reg[1], reg[2], reg[3]); reg[0] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD0_REG); reg[1] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD1_REG); reg[2] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD2_REG); reg[3] = readl(port->dma_base + GMAC_DMA_TX_DESC_WORD3_REG); netdev_err(netdev, "TX DMA descriptor: 0x%08x 0x%08x 0x%08x 0x%08x\n", reg[0], reg[1], reg[2], reg[3]); /* FREE queues status */ ptr_reg = geth->base + GLOBAL_SWFQ_RWPTR_REG; reg[0] = GET_RPTR(ptr_reg); reg[1] = GET_WPTR(ptr_reg); ptr_reg = geth->base + GLOBAL_HWFQ_RWPTR_REG; reg[2] = GET_RPTR(ptr_reg); reg[3] = GET_WPTR(ptr_reg); netdev_err(netdev, "FQ SW ptr: %u %u, HW ptr: %u %u\n", reg[0], reg[1], reg[2], reg[3]); } static void gmac_update_hw_stats(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); unsigned int rx_discards, rx_mcast, rx_bcast; struct gemini_ethernet *geth = port->geth; unsigned long flags; spin_lock_irqsave(&geth->irq_lock, flags); u64_stats_update_begin(&port->ir_stats_syncp); rx_discards = readl(port->gmac_base + GMAC_IN_DISCARDS); port->hw_stats[0] += rx_discards; port->hw_stats[1] += readl(port->gmac_base + GMAC_IN_ERRORS); rx_mcast = readl(port->gmac_base + GMAC_IN_MCAST); port->hw_stats[2] += rx_mcast; rx_bcast = readl(port->gmac_base + GMAC_IN_BCAST); port->hw_stats[3] += rx_bcast; port->hw_stats[4] += readl(port->gmac_base + GMAC_IN_MAC1); port->hw_stats[5] += readl(port->gmac_base + GMAC_IN_MAC2); port->stats.rx_missed_errors += rx_discards; port->stats.multicast += rx_mcast; port->stats.multicast += rx_bcast; writel(GMAC0_MIB_INT_BIT << (netdev->dev_id * 8), geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); u64_stats_update_end(&port->ir_stats_syncp); spin_unlock_irqrestore(&geth->irq_lock, flags); } /** * gmac_get_intr_flags() - get interrupt status flags for a port from * @netdev: the net device for the port to get flags from * @i: the interrupt status register 0..4 */ static u32 gmac_get_intr_flags(struct net_device *netdev, int i) { struct gemini_ethernet_port *port = netdev_priv(netdev); struct gemini_ethernet *geth = port->geth; void __iomem *irqif_reg, *irqen_reg; unsigned int offs, val; /* Calculate the offset using the stride of the status registers */ offs = i * (GLOBAL_INTERRUPT_STATUS_1_REG - GLOBAL_INTERRUPT_STATUS_0_REG); irqif_reg = geth->base + GLOBAL_INTERRUPT_STATUS_0_REG + offs; irqen_reg = geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG + offs; val = readl(irqif_reg) & readl(irqen_reg); return val; } static enum hrtimer_restart gmac_coalesce_delay_expired(struct hrtimer *timer) { struct gemini_ethernet_port *port = container_of(timer, struct gemini_ethernet_port, rx_coalesce_timer); napi_schedule(&port->napi); return HRTIMER_NORESTART; } static irqreturn_t gmac_irq(int irq, void *data) { struct gemini_ethernet_port *port; struct net_device *netdev = data; struct gemini_ethernet *geth; u32 val, orr = 0; port = netdev_priv(netdev); geth = port->geth; val = gmac_get_intr_flags(netdev, 0); orr |= val; if (val & (GMAC0_IRQ0_2 << (netdev->dev_id * 2))) { /* Oh, crap */ netdev_err(netdev, "hw failure/sw bug\n"); gmac_dump_dma_state(netdev); /* don't know how to recover, just reduce losses */ gmac_enable_irq(netdev, 0); return IRQ_HANDLED; } if (val & (GMAC0_IRQ0_TXQ0_INTS << (netdev->dev_id * 6))) gmac_tx_irq(netdev, 0); val = gmac_get_intr_flags(netdev, 1); orr |= val; if (val & (DEFAULT_Q0_INT_BIT << netdev->dev_id)) { gmac_enable_rx_irq(netdev, 0); if (!port->rx_coalesce_nsecs) { napi_schedule(&port->napi); } else { ktime_t ktime; ktime = ktime_set(0, port->rx_coalesce_nsecs); hrtimer_start(&port->rx_coalesce_timer, ktime, HRTIMER_MODE_REL); } } val = gmac_get_intr_flags(netdev, 4); orr |= val; if (val & (GMAC0_MIB_INT_BIT << (netdev->dev_id * 8))) gmac_update_hw_stats(netdev); if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) { writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8), geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); spin_lock(&geth->irq_lock); u64_stats_update_begin(&port->ir_stats_syncp); ++port->stats.rx_fifo_errors; u64_stats_update_end(&port->ir_stats_syncp); spin_unlock(&geth->irq_lock); } return orr ? IRQ_HANDLED : IRQ_NONE; } static void gmac_start_dma(struct gemini_ethernet_port *port) { void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG; union gmac_dma_ctrl dma_ctrl; dma_ctrl.bits32 = readl(dma_ctrl_reg); dma_ctrl.bits.rd_enable = 1; dma_ctrl.bits.td_enable = 1; dma_ctrl.bits.loopback = 0; dma_ctrl.bits.drop_small_ack = 0; dma_ctrl.bits.rd_insert_bytes = NET_IP_ALIGN; dma_ctrl.bits.rd_prot = HPROT_DATA_CACHE | HPROT_PRIVILIGED; dma_ctrl.bits.rd_burst_size = HBURST_INCR8; dma_ctrl.bits.rd_bus = HSIZE_8; dma_ctrl.bits.td_prot = HPROT_DATA_CACHE; dma_ctrl.bits.td_burst_size = HBURST_INCR8; dma_ctrl.bits.td_bus = HSIZE_8; writel(dma_ctrl.bits32, dma_ctrl_reg); } static void gmac_stop_dma(struct gemini_ethernet_port *port) { void __iomem *dma_ctrl_reg = port->dma_base + GMAC_DMA_CTRL_REG; union gmac_dma_ctrl dma_ctrl; dma_ctrl.bits32 = readl(dma_ctrl_reg); dma_ctrl.bits.rd_enable = 0; dma_ctrl.bits.td_enable = 0; writel(dma_ctrl.bits32, dma_ctrl_reg); } static int gmac_open(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); int err; err = request_irq(netdev->irq, gmac_irq, IRQF_SHARED, netdev->name, netdev); if (err) { netdev_err(netdev, "no IRQ\n"); return err; } netif_carrier_off(netdev); phy_start(netdev->phydev); err = geth_resize_freeq(port); /* It's fine if it's just busy, the other port has set up * the freeq in that case. */ if (err && (err != -EBUSY)) { netdev_err(netdev, "could not resize freeq\n"); goto err_stop_phy; } err = gmac_setup_rxq(netdev); if (err) { netdev_err(netdev, "could not setup RXQ\n"); goto err_stop_phy; } err = gmac_setup_txqs(netdev); if (err) { netdev_err(netdev, "could not setup TXQs\n"); gmac_cleanup_rxq(netdev); goto err_stop_phy; } napi_enable(&port->napi); gmac_start_dma(port); gmac_enable_irq(netdev, 1); gmac_enable_tx_rx(netdev); netif_tx_start_all_queues(netdev); hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired; netdev_dbg(netdev, "opened\n"); return 0; err_stop_phy: phy_stop(netdev->phydev); free_irq(netdev->irq, netdev); return err; } static int gmac_stop(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); hrtimer_cancel(&port->rx_coalesce_timer); netif_tx_stop_all_queues(netdev); gmac_disable_tx_rx(netdev); gmac_stop_dma(port); napi_disable(&port->napi); gmac_enable_irq(netdev, 0); gmac_cleanup_rxq(netdev); gmac_cleanup_txqs(netdev); phy_stop(netdev->phydev); free_irq(netdev->irq, netdev); gmac_update_hw_stats(netdev); return 0; } static void gmac_set_rx_mode(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); union gmac_rx_fltr filter = { .bits = { .broadcast = 1, .multicast = 1, .unicast = 1, } }; struct netdev_hw_addr *ha; unsigned int bit_nr; u32 mc_filter[2]; mc_filter[1] = 0; mc_filter[0] = 0; if (netdev->flags & IFF_PROMISC) { filter.bits.error = 1; filter.bits.promiscuous = 1; mc_filter[1] = ~0; mc_filter[0] = ~0; } else if (netdev->flags & IFF_ALLMULTI) { mc_filter[1] = ~0; mc_filter[0] = ~0; } else { netdev_for_each_mc_addr(ha, netdev) { bit_nr = ~crc32_le(~0, ha->addr, ETH_ALEN) & 0x3f; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 0x1f); } } writel(mc_filter[0], port->gmac_base + GMAC_MCAST_FIL0); writel(mc_filter[1], port->gmac_base + GMAC_MCAST_FIL1); writel(filter.bits32, port->gmac_base + GMAC_RX_FLTR); } static void gmac_write_mac_address(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); __le32 addr[3]; memset(addr, 0, sizeof(addr)); memcpy(addr, netdev->dev_addr, ETH_ALEN); writel(le32_to_cpu(addr[0]), port->gmac_base + GMAC_STA_ADD0); writel(le32_to_cpu(addr[1]), port->gmac_base + GMAC_STA_ADD1); writel(le32_to_cpu(addr[2]), port->gmac_base + GMAC_STA_ADD2); } static int gmac_set_mac_address(struct net_device *netdev, void *addr) { struct sockaddr *sa = addr; eth_hw_addr_set(netdev, sa->sa_data); gmac_write_mac_address(netdev); return 0; } static void gmac_clear_hw_stats(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); readl(port->gmac_base + GMAC_IN_DISCARDS); readl(port->gmac_base + GMAC_IN_ERRORS); readl(port->gmac_base + GMAC_IN_MCAST); readl(port->gmac_base + GMAC_IN_BCAST); readl(port->gmac_base + GMAC_IN_MAC1); readl(port->gmac_base + GMAC_IN_MAC2); } static void gmac_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct gemini_ethernet_port *port = netdev_priv(netdev); unsigned int start; gmac_update_hw_stats(netdev); /* Racing with RX NAPI */ do { start = u64_stats_fetch_begin(&port->rx_stats_syncp); stats->rx_packets = port->stats.rx_packets; stats->rx_bytes = port->stats.rx_bytes; stats->rx_errors = port->stats.rx_errors; stats->rx_dropped = port->stats.rx_dropped; stats->rx_length_errors = port->stats.rx_length_errors; stats->rx_over_errors = port->stats.rx_over_errors; stats->rx_crc_errors = port->stats.rx_crc_errors; stats->rx_frame_errors = port->stats.rx_frame_errors; } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); /* Racing with MIB and TX completion interrupts */ do { start = u64_stats_fetch_begin(&port->ir_stats_syncp); stats->tx_errors = port->stats.tx_errors; stats->tx_packets = port->stats.tx_packets; stats->tx_bytes = port->stats.tx_bytes; stats->multicast = port->stats.multicast; stats->rx_missed_errors = port->stats.rx_missed_errors; stats->rx_fifo_errors = port->stats.rx_fifo_errors; } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); /* Racing with hard_start_xmit */ do { start = u64_stats_fetch_begin(&port->tx_stats_syncp); stats->tx_dropped = port->stats.tx_dropped; } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); stats->rx_dropped += stats->rx_missed_errors; } static int gmac_change_mtu(struct net_device *netdev, int new_mtu) { int max_len = gmac_pick_rx_max_len(new_mtu); if (max_len < 0) return -EINVAL; gmac_disable_tx_rx(netdev); netdev->mtu = new_mtu; gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT, CONFIG0_MAXLEN_MASK); netdev_update_features(netdev); gmac_enable_tx_rx(netdev); return 0; } static netdev_features_t gmac_fix_features(struct net_device *netdev, netdev_features_t features) { if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK) features &= ~GMAC_OFFLOAD_FEATURES; return features; } static int gmac_set_features(struct net_device *netdev, netdev_features_t features) { struct gemini_ethernet_port *port = netdev_priv(netdev); int enable = features & NETIF_F_RXCSUM; unsigned long flags; u32 reg; spin_lock_irqsave(&port->config_lock, flags); reg = readl(port->gmac_base + GMAC_CONFIG0); reg = enable ? reg | CONFIG0_RX_CHKSUM : reg & ~CONFIG0_RX_CHKSUM; writel(reg, port->gmac_base + GMAC_CONFIG0); spin_unlock_irqrestore(&port->config_lock, flags); return 0; } static int gmac_get_sset_count(struct net_device *netdev, int sset) { return sset == ETH_SS_STATS ? GMAC_STATS_NUM : 0; } static void gmac_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { if (stringset != ETH_SS_STATS) return; memcpy(data, gmac_stats_strings, sizeof(gmac_stats_strings)); } static void gmac_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *estats, u64 *values) { struct gemini_ethernet_port *port = netdev_priv(netdev); unsigned int start; u64 *p; int i; gmac_update_hw_stats(netdev); /* Racing with MIB interrupt */ do { p = values; start = u64_stats_fetch_begin(&port->ir_stats_syncp); for (i = 0; i < RX_STATS_NUM; i++) *p++ = port->hw_stats[i]; } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start)); values = p; /* Racing with RX NAPI */ do { p = values; start = u64_stats_fetch_begin(&port->rx_stats_syncp); for (i = 0; i < RX_STATUS_NUM; i++) *p++ = port->rx_stats[i]; for (i = 0; i < RX_CHKSUM_NUM; i++) *p++ = port->rx_csum_stats[i]; *p++ = port->rx_napi_exits; } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start)); values = p; /* Racing with TX start_xmit */ do { p = values; start = u64_stats_fetch_begin(&port->tx_stats_syncp); for (i = 0; i < TX_MAX_FRAGS; i++) { *values++ = port->tx_frag_stats[i]; port->tx_frag_stats[i] = 0; } *values++ = port->tx_frags_linearized; *values++ = port->tx_hw_csummed; } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start)); } static int gmac_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { if (!netdev->phydev) return -ENXIO; phy_ethtool_ksettings_get(netdev->phydev, cmd); return 0; } static int gmac_set_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { if (!netdev->phydev) return -ENXIO; return phy_ethtool_ksettings_set(netdev->phydev, cmd); } static int gmac_nway_reset(struct net_device *netdev) { if (!netdev->phydev) return -ENXIO; return phy_start_aneg(netdev->phydev); } static void gmac_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pparam) { struct gemini_ethernet_port *port = netdev_priv(netdev); union gmac_config0 config0; config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0); pparam->rx_pause = config0.bits.rx_fc_en; pparam->tx_pause = config0.bits.tx_fc_en; pparam->autoneg = true; } static void gmac_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *rp, struct kernel_ethtool_ringparam *kernel_rp, struct netlink_ext_ack *extack) { struct gemini_ethernet_port *port = netdev_priv(netdev); readl(port->gmac_base + GMAC_CONFIG0); rp->rx_max_pending = 1 << 15; rp->rx_mini_max_pending = 0; rp->rx_jumbo_max_pending = 0; rp->tx_max_pending = 1 << 15; rp->rx_pending = 1 << port->rxq_order; rp->rx_mini_pending = 0; rp->rx_jumbo_pending = 0; rp->tx_pending = 1 << port->txq_order; } static int gmac_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *rp, struct kernel_ethtool_ringparam *kernel_rp, struct netlink_ext_ack *extack) { struct gemini_ethernet_port *port = netdev_priv(netdev); int err = 0; if (netif_running(netdev)) return -EBUSY; if (rp->rx_pending) { port->rxq_order = min(15, ilog2(rp->rx_pending - 1) + 1); err = geth_resize_freeq(port); } if (rp->tx_pending) { port->txq_order = min(15, ilog2(rp->tx_pending - 1) + 1); port->irq_every_tx_packets = 1 << (port->txq_order - 2); } return err; } static int gmac_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct gemini_ethernet_port *port = netdev_priv(netdev); ecmd->rx_max_coalesced_frames = 1; ecmd->tx_max_coalesced_frames = port->irq_every_tx_packets; ecmd->rx_coalesce_usecs = port->rx_coalesce_nsecs / 1000; return 0; } static int gmac_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct gemini_ethernet_port *port = netdev_priv(netdev); if (ecmd->tx_max_coalesced_frames < 1) return -EINVAL; if (ecmd->tx_max_coalesced_frames >= 1 << port->txq_order) return -EINVAL; port->irq_every_tx_packets = ecmd->tx_max_coalesced_frames; port->rx_coalesce_nsecs = ecmd->rx_coalesce_usecs * 1000; return 0; } static u32 gmac_get_msglevel(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); return port->msg_enable; } static void gmac_set_msglevel(struct net_device *netdev, u32 level) { struct gemini_ethernet_port *port = netdev_priv(netdev); port->msg_enable = level; } static void gmac_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { strcpy(info->driver, DRV_NAME); strcpy(info->bus_info, netdev->dev_id ? "1" : "0"); } static const struct net_device_ops gmac_351x_ops = { .ndo_init = gmac_init, .ndo_open = gmac_open, .ndo_stop = gmac_stop, .ndo_start_xmit = gmac_start_xmit, .ndo_tx_timeout = gmac_tx_timeout, .ndo_set_rx_mode = gmac_set_rx_mode, .ndo_set_mac_address = gmac_set_mac_address, .ndo_get_stats64 = gmac_get_stats64, .ndo_change_mtu = gmac_change_mtu, .ndo_fix_features = gmac_fix_features, .ndo_set_features = gmac_set_features, }; static const struct ethtool_ops gmac_351x_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_MAX_FRAMES, .get_sset_count = gmac_get_sset_count, .get_strings = gmac_get_strings, .get_ethtool_stats = gmac_get_ethtool_stats, .get_link = ethtool_op_get_link, .get_link_ksettings = gmac_get_ksettings, .set_link_ksettings = gmac_set_ksettings, .nway_reset = gmac_nway_reset, .get_pauseparam = gmac_get_pauseparam, .get_ringparam = gmac_get_ringparam, .set_ringparam = gmac_set_ringparam, .get_coalesce = gmac_get_coalesce, .set_coalesce = gmac_set_coalesce, .get_msglevel = gmac_get_msglevel, .set_msglevel = gmac_set_msglevel, .get_drvinfo = gmac_get_drvinfo, }; static irqreturn_t gemini_port_irq_thread(int irq, void *data) { unsigned long irqmask = SWFQ_EMPTY_INT_BIT; struct gemini_ethernet_port *port = data; struct gemini_ethernet *geth; unsigned long flags; geth = port->geth; /* The queue is half empty so refill it */ geth_fill_freeq(geth, true); spin_lock_irqsave(&geth->irq_lock, flags); /* ACK queue interrupt */ writel(irqmask, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); /* Enable queue interrupt again */ irqmask |= readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); writel(irqmask, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); spin_unlock_irqrestore(&geth->irq_lock, flags); return IRQ_HANDLED; } static irqreturn_t gemini_port_irq(int irq, void *data) { struct gemini_ethernet_port *port = data; struct gemini_ethernet *geth; irqreturn_t ret = IRQ_NONE; u32 val, en; geth = port->geth; spin_lock(&geth->irq_lock); val = readl(geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); en = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); if (val & en & SWFQ_EMPTY_INT_BIT) { /* Disable the queue empty interrupt while we work on * processing the queue. Also disable overrun interrupts * as there is not much we can do about it here. */ en &= ~(SWFQ_EMPTY_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT | GMAC1_RX_OVERRUN_INT_BIT); writel(en, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); ret = IRQ_WAKE_THREAD; } spin_unlock(&geth->irq_lock); return ret; } static void gemini_port_remove(struct gemini_ethernet_port *port) { if (port->netdev) { phy_disconnect(port->netdev->phydev); unregister_netdev(port->netdev); } clk_disable_unprepare(port->pclk); geth_cleanup_freeq(port->geth); } static void gemini_ethernet_init(struct gemini_ethernet *geth) { /* Only do this once both ports are online */ if (geth->initialized) return; if (geth->port0 && geth->port1) geth->initialized = true; else return; writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_1_REG); writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_2_REG); writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_3_REG); writel(0, geth->base + GLOBAL_INTERRUPT_ENABLE_4_REG); /* Interrupt config: * * GMAC0 intr bits ------> int0 ----> eth0 * GMAC1 intr bits ------> int1 ----> eth1 * TOE intr -------------> int1 ----> eth1 * Classification Intr --> int0 ----> eth0 * Default Q0 -----------> int0 ----> eth0 * Default Q1 -----------> int1 ----> eth1 * FreeQ intr -----------> int1 ----> eth1 */ writel(0xCCFC0FC0, geth->base + GLOBAL_INTERRUPT_SELECT_0_REG); writel(0x00F00002, geth->base + GLOBAL_INTERRUPT_SELECT_1_REG); writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_2_REG); writel(0xFFFFFFFF, geth->base + GLOBAL_INTERRUPT_SELECT_3_REG); writel(0xFF000003, geth->base + GLOBAL_INTERRUPT_SELECT_4_REG); /* edge-triggered interrupts packed to level-triggered one... */ writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_0_REG); writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_1_REG); writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_2_REG); writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_3_REG); writel(~0, geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); /* Set up queue */ writel(0, geth->base + GLOBAL_SW_FREEQ_BASE_SIZE_REG); writel(0, geth->base + GLOBAL_HW_FREEQ_BASE_SIZE_REG); writel(0, geth->base + GLOBAL_SWFQ_RWPTR_REG); writel(0, geth->base + GLOBAL_HWFQ_RWPTR_REG); geth->freeq_frag_order = DEFAULT_RX_BUF_ORDER; /* This makes the queue resize on probe() so that we * set up and enable the queue IRQ. FIXME: fragile. */ geth->freeq_order = 1; } static void gemini_port_save_mac_addr(struct gemini_ethernet_port *port) { port->mac_addr[0] = cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD0)); port->mac_addr[1] = cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD1)); port->mac_addr[2] = cpu_to_le32(readl(port->gmac_base + GMAC_STA_ADD2)); } static int gemini_ethernet_port_probe(struct platform_device *pdev) { char *port_names[2] = { "ethernet0", "ethernet1" }; struct device_node *np = pdev->dev.of_node; struct gemini_ethernet_port *port; struct device *dev = &pdev->dev; struct gemini_ethernet *geth; struct net_device *netdev; struct device *parent; u8 mac[ETH_ALEN]; unsigned int id; int irq; int ret; parent = dev->parent; geth = dev_get_drvdata(parent); if (!strcmp(dev_name(dev), "60008000.ethernet-port")) id = 0; else if (!strcmp(dev_name(dev), "6000c000.ethernet-port")) id = 1; else return -ENODEV; dev_info(dev, "probe %s ID %d\n", dev_name(dev), id); netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM); if (!netdev) { dev_err(dev, "Can't allocate ethernet device #%d\n", id); return -ENOMEM; } port = netdev_priv(netdev); SET_NETDEV_DEV(netdev, dev); port->netdev = netdev; port->id = id; port->geth = geth; port->dev = dev; port->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); /* DMA memory */ port->dma_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(port->dma_base)) { dev_err(dev, "get DMA address failed\n"); return PTR_ERR(port->dma_base); } /* GMAC config memory */ port->gmac_base = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); if (IS_ERR(port->gmac_base)) { dev_err(dev, "get GMAC address failed\n"); return PTR_ERR(port->gmac_base); } /* Interrupt */ irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; port->irq = irq; /* Clock the port */ port->pclk = devm_clk_get(dev, "PCLK"); if (IS_ERR(port->pclk)) { dev_err(dev, "no PCLK\n"); return PTR_ERR(port->pclk); } ret = clk_prepare_enable(port->pclk); if (ret) return ret; /* Maybe there is a nice ethernet address we should use */ gemini_port_save_mac_addr(port); /* Reset the port */ port->reset = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(port->reset)) { dev_err(dev, "no reset\n"); ret = PTR_ERR(port->reset); goto unprepare; } reset_control_reset(port->reset); usleep_range(100, 500); /* Assign pointer in the main state container */ if (!id) geth->port0 = port; else geth->port1 = port; /* This will just be done once both ports are up and reset */ gemini_ethernet_init(geth); platform_set_drvdata(pdev, port); /* Set up and register the netdev */ netdev->dev_id = port->id; netdev->irq = irq; netdev->netdev_ops = &gmac_351x_ops; netdev->ethtool_ops = &gmac_351x_ethtool_ops; spin_lock_init(&port->config_lock); gmac_clear_hw_stats(netdev); netdev->hw_features = GMAC_OFFLOAD_FEATURES; netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO; /* We can handle jumbo frames up to 10236 bytes so, let's accept * payloads of 10236 bytes minus VLAN and ethernet header */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = 10236 - VLAN_ETH_HLEN; port->freeq_refill = 0; netif_napi_add(netdev, &port->napi, gmac_napi_poll); ret = of_get_mac_address(np, mac); if (!ret) { dev_info(dev, "Setting macaddr from DT %pM\n", mac); memcpy(port->mac_addr, mac, ETH_ALEN); } if (is_valid_ether_addr((void *)port->mac_addr)) { eth_hw_addr_set(netdev, (u8 *)port->mac_addr); } else { dev_dbg(dev, "ethernet address 0x%08x%08x%08x invalid\n", port->mac_addr[0], port->mac_addr[1], port->mac_addr[2]); dev_info(dev, "using a random ethernet address\n"); eth_hw_addr_random(netdev); } gmac_write_mac_address(netdev); ret = devm_request_threaded_irq(port->dev, port->irq, gemini_port_irq, gemini_port_irq_thread, IRQF_SHARED, port_names[port->id], port); if (ret) goto unprepare; ret = gmac_setup_phy(netdev); if (ret) { netdev_err(netdev, "PHY init failed\n"); goto unprepare; } ret = register_netdev(netdev); if (ret) goto unprepare; return 0; unprepare: clk_disable_unprepare(port->pclk); return ret; } static int gemini_ethernet_port_remove(struct platform_device *pdev) { struct gemini_ethernet_port *port = platform_get_drvdata(pdev); gemini_port_remove(port); return 0; } static const struct of_device_id gemini_ethernet_port_of_match[] = { { .compatible = "cortina,gemini-ethernet-port", }, {}, }; MODULE_DEVICE_TABLE(of, gemini_ethernet_port_of_match); static struct platform_driver gemini_ethernet_port_driver = { .driver = { .name = "gemini-ethernet-port", .of_match_table = gemini_ethernet_port_of_match, }, .probe = gemini_ethernet_port_probe, .remove = gemini_ethernet_port_remove, }; static int gemini_ethernet_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct gemini_ethernet *geth; unsigned int retry = 5; u32 val; /* Global registers */ geth = devm_kzalloc(dev, sizeof(*geth), GFP_KERNEL); if (!geth) return -ENOMEM; geth->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(geth->base)) return PTR_ERR(geth->base); geth->dev = dev; /* Wait for ports to stabilize */ do { udelay(2); val = readl(geth->base + GLOBAL_TOE_VERSION_REG); barrier(); } while (!val && --retry); if (!retry) { dev_err(dev, "failed to reset ethernet\n"); return -EIO; } dev_info(dev, "Ethernet device ID: 0x%03x, revision 0x%01x\n", (val >> 4) & 0xFFFU, val & 0xFU); spin_lock_init(&geth->irq_lock); spin_lock_init(&geth->freeq_lock); /* The children will use this */ platform_set_drvdata(pdev, geth); /* Spawn child devices for the two ports */ return devm_of_platform_populate(dev); } static int gemini_ethernet_remove(struct platform_device *pdev) { struct gemini_ethernet *geth = platform_get_drvdata(pdev); geth_cleanup_freeq(geth); geth->initialized = false; return 0; } static const struct of_device_id gemini_ethernet_of_match[] = { { .compatible = "cortina,gemini-ethernet", }, {}, }; MODULE_DEVICE_TABLE(of, gemini_ethernet_of_match); static struct platform_driver gemini_ethernet_driver = { .driver = { .name = DRV_NAME, .of_match_table = gemini_ethernet_of_match, }, .probe = gemini_ethernet_probe, .remove = gemini_ethernet_remove, }; static int __init gemini_ethernet_module_init(void) { int ret; ret = platform_driver_register(&gemini_ethernet_port_driver); if (ret) return ret; ret = platform_driver_register(&gemini_ethernet_driver); if (ret) { platform_driver_unregister(&gemini_ethernet_port_driver); return ret; } return 0; } module_init(gemini_ethernet_module_init); static void __exit gemini_ethernet_module_exit(void) { platform_driver_unregister(&gemini_ethernet_driver); platform_driver_unregister(&gemini_ethernet_port_driver); } module_exit(gemini_ethernet_module_exit); MODULE_AUTHOR("Linus Walleij <[email protected]>"); MODULE_DESCRIPTION("StorLink SL351x (Gemini) ethernet driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME);
linux-master
drivers/net/ethernet/cortina/gemini.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) #include <linux/bitmap.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/nvme.h> #include <linux/pci.h> #include <linux/wait.h> #include <linux/sched/signal.h> #include "fun_queue.h" #include "fun_dev.h" #define FUN_ADMIN_CMD_TO_MS 3000 enum { AQA_ASQS_SHIFT = 0, AQA_ACQS_SHIFT = 16, AQA_MIN_QUEUE_SIZE = 2, AQA_MAX_QUEUE_SIZE = 4096 }; /* context for admin commands */ struct fun_cmd_ctx { fun_admin_callback_t cb; /* callback to invoke on completion */ void *cb_data; /* user data provided to callback */ int cpu; /* CPU where the cmd's tag was allocated */ }; /* Context for synchronous admin commands. */ struct fun_sync_cmd_ctx { struct completion compl; u8 *rsp_buf; /* caller provided response buffer */ unsigned int rsp_len; /* response buffer size */ u8 rsp_status; /* command response status */ }; /* Wait for the CSTS.RDY bit to match @enabled. */ static int fun_wait_ready(struct fun_dev *fdev, bool enabled) { unsigned int cap_to = NVME_CAP_TIMEOUT(fdev->cap_reg); u32 bit = enabled ? NVME_CSTS_RDY : 0; unsigned long deadline; deadline = ((cap_to + 1) * HZ / 2) + jiffies; /* CAP.TO is in 500ms */ for (;;) { u32 csts = readl(fdev->bar + NVME_REG_CSTS); if (csts == ~0) { dev_err(fdev->dev, "CSTS register read %#x\n", csts); return -EIO; } if ((csts & NVME_CSTS_RDY) == bit) return 0; if (time_is_before_jiffies(deadline)) break; msleep(100); } dev_err(fdev->dev, "Timed out waiting for device to indicate RDY %u; aborting %s\n", enabled, enabled ? "initialization" : "reset"); return -ETIMEDOUT; } /* Check CSTS and return an error if it is unreadable or has unexpected * RDY value. */ static int fun_check_csts_rdy(struct fun_dev *fdev, unsigned int expected_rdy) { u32 csts = readl(fdev->bar + NVME_REG_CSTS); u32 actual_rdy = csts & NVME_CSTS_RDY; if (csts == ~0) { dev_err(fdev->dev, "CSTS register read %#x\n", csts); return -EIO; } if (actual_rdy != expected_rdy) { dev_err(fdev->dev, "Unexpected CSTS RDY %u\n", actual_rdy); return -EINVAL; } return 0; } /* Check that CSTS RDY has the expected value. Then write a new value to the CC * register and wait for CSTS RDY to match the new CC ENABLE state. */ static int fun_update_cc_enable(struct fun_dev *fdev, unsigned int initial_rdy) { int rc = fun_check_csts_rdy(fdev, initial_rdy); if (rc) return rc; writel(fdev->cc_reg, fdev->bar + NVME_REG_CC); return fun_wait_ready(fdev, !!(fdev->cc_reg & NVME_CC_ENABLE)); } static int fun_disable_ctrl(struct fun_dev *fdev) { fdev->cc_reg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE); return fun_update_cc_enable(fdev, 1); } static int fun_enable_ctrl(struct fun_dev *fdev, u32 admin_cqesz_log2, u32 admin_sqesz_log2) { fdev->cc_reg = (admin_cqesz_log2 << NVME_CC_IOCQES_SHIFT) | (admin_sqesz_log2 << NVME_CC_IOSQES_SHIFT) | ((PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT) | NVME_CC_ENABLE; return fun_update_cc_enable(fdev, 0); } static int fun_map_bars(struct fun_dev *fdev, const char *name) { struct pci_dev *pdev = to_pci_dev(fdev->dev); int err; err = pci_request_mem_regions(pdev, name); if (err) { dev_err(&pdev->dev, "Couldn't get PCI memory resources, err %d\n", err); return err; } fdev->bar = pci_ioremap_bar(pdev, 0); if (!fdev->bar) { dev_err(&pdev->dev, "Couldn't map BAR 0\n"); pci_release_mem_regions(pdev); return -ENOMEM; } return 0; } static void fun_unmap_bars(struct fun_dev *fdev) { struct pci_dev *pdev = to_pci_dev(fdev->dev); if (fdev->bar) { iounmap(fdev->bar); fdev->bar = NULL; pci_release_mem_regions(pdev); } } static int fun_set_dma_masks(struct device *dev) { int err; err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (err) dev_err(dev, "DMA mask configuration failed, err %d\n", err); return err; } static irqreturn_t fun_admin_irq(int irq, void *data) { struct fun_queue *funq = data; return fun_process_cq(funq, 0) ? IRQ_HANDLED : IRQ_NONE; } static void fun_complete_admin_cmd(struct fun_queue *funq, void *data, void *entry, const struct fun_cqe_info *info) { const struct fun_admin_rsp_common *rsp_common = entry; struct fun_dev *fdev = funq->fdev; struct fun_cmd_ctx *cmd_ctx; int cpu; u16 cid; if (info->sqhd == cpu_to_be16(0xffff)) { dev_dbg(fdev->dev, "adminq event"); if (fdev->adminq_cb) fdev->adminq_cb(fdev, entry); return; } cid = be16_to_cpu(rsp_common->cid); dev_dbg(fdev->dev, "admin CQE cid %u, op %u, ret %u\n", cid, rsp_common->op, rsp_common->ret); cmd_ctx = &fdev->cmd_ctx[cid]; if (cmd_ctx->cpu < 0) { dev_err(fdev->dev, "admin CQE with CID=%u, op=%u does not match a pending command\n", cid, rsp_common->op); return; } if (cmd_ctx->cb) cmd_ctx->cb(fdev, entry, xchg(&cmd_ctx->cb_data, NULL)); cpu = cmd_ctx->cpu; cmd_ctx->cpu = -1; sbitmap_queue_clear(&fdev->admin_sbq, cid, cpu); } static int fun_init_cmd_ctx(struct fun_dev *fdev, unsigned int ntags) { unsigned int i; fdev->cmd_ctx = kvcalloc(ntags, sizeof(*fdev->cmd_ctx), GFP_KERNEL); if (!fdev->cmd_ctx) return -ENOMEM; for (i = 0; i < ntags; i++) fdev->cmd_ctx[i].cpu = -1; return 0; } /* Allocate and enable an admin queue and assign it the first IRQ vector. */ static int fun_enable_admin_queue(struct fun_dev *fdev, const struct fun_dev_params *areq) { struct fun_queue_alloc_req qreq = { .cqe_size_log2 = areq->cqe_size_log2, .sqe_size_log2 = areq->sqe_size_log2, .cq_depth = areq->cq_depth, .sq_depth = areq->sq_depth, .rq_depth = areq->rq_depth, }; unsigned int ntags = areq->sq_depth - 1; struct fun_queue *funq; int rc; if (fdev->admin_q) return -EEXIST; if (areq->sq_depth < AQA_MIN_QUEUE_SIZE || areq->sq_depth > AQA_MAX_QUEUE_SIZE || areq->cq_depth < AQA_MIN_QUEUE_SIZE || areq->cq_depth > AQA_MAX_QUEUE_SIZE) return -EINVAL; fdev->admin_q = fun_alloc_queue(fdev, 0, &qreq); if (!fdev->admin_q) return -ENOMEM; rc = fun_init_cmd_ctx(fdev, ntags); if (rc) goto free_q; rc = sbitmap_queue_init_node(&fdev->admin_sbq, ntags, -1, false, GFP_KERNEL, dev_to_node(fdev->dev)); if (rc) goto free_cmd_ctx; funq = fdev->admin_q; funq->cq_vector = 0; rc = fun_request_irq(funq, dev_name(fdev->dev), fun_admin_irq, funq); if (rc) goto free_sbq; fun_set_cq_callback(funq, fun_complete_admin_cmd, NULL); fdev->adminq_cb = areq->event_cb; writel((funq->sq_depth - 1) << AQA_ASQS_SHIFT | (funq->cq_depth - 1) << AQA_ACQS_SHIFT, fdev->bar + NVME_REG_AQA); writeq(funq->sq_dma_addr, fdev->bar + NVME_REG_ASQ); writeq(funq->cq_dma_addr, fdev->bar + NVME_REG_ACQ); rc = fun_enable_ctrl(fdev, areq->cqe_size_log2, areq->sqe_size_log2); if (rc) goto free_irq; if (areq->rq_depth) { rc = fun_create_rq(funq); if (rc) goto disable_ctrl; funq_rq_post(funq); } return 0; disable_ctrl: fun_disable_ctrl(fdev); free_irq: fun_free_irq(funq); free_sbq: sbitmap_queue_free(&fdev->admin_sbq); free_cmd_ctx: kvfree(fdev->cmd_ctx); fdev->cmd_ctx = NULL; free_q: fun_free_queue(fdev->admin_q); fdev->admin_q = NULL; return rc; } static void fun_disable_admin_queue(struct fun_dev *fdev) { struct fun_queue *admq = fdev->admin_q; if (!admq) return; fun_disable_ctrl(fdev); fun_free_irq(admq); __fun_process_cq(admq, 0); sbitmap_queue_free(&fdev->admin_sbq); kvfree(fdev->cmd_ctx); fdev->cmd_ctx = NULL; fun_free_queue(admq); fdev->admin_q = NULL; } /* Return %true if the admin queue has stopped servicing commands as can be * detected through registers. This isn't exhaustive and may provide false * negatives. */ static bool fun_adminq_stopped(struct fun_dev *fdev) { u32 csts = readl(fdev->bar + NVME_REG_CSTS); return (csts & (NVME_CSTS_CFS | NVME_CSTS_RDY)) != NVME_CSTS_RDY; } static int fun_wait_for_tag(struct fun_dev *fdev, int *cpup) { struct sbitmap_queue *sbq = &fdev->admin_sbq; struct sbq_wait_state *ws = &sbq->ws[0]; DEFINE_SBQ_WAIT(wait); int tag; for (;;) { sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_UNINTERRUPTIBLE); if (fdev->suppress_cmds) { tag = -ESHUTDOWN; break; } tag = sbitmap_queue_get(sbq, cpup); if (tag >= 0) break; schedule(); } sbitmap_finish_wait(sbq, ws, &wait); return tag; } /* Submit an asynchronous admin command. Caller is responsible for implementing * any waiting or timeout. Upon command completion the callback @cb is called. */ int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd, fun_admin_callback_t cb, void *cb_data, bool wait_ok) { struct fun_queue *funq = fdev->admin_q; unsigned int cmdsize = cmd->len8 * 8; struct fun_cmd_ctx *cmd_ctx; int tag, cpu, rc = 0; if (WARN_ON(cmdsize > (1 << funq->sqe_size_log2))) return -EMSGSIZE; tag = sbitmap_queue_get(&fdev->admin_sbq, &cpu); if (tag < 0) { if (!wait_ok) return -EAGAIN; tag = fun_wait_for_tag(fdev, &cpu); if (tag < 0) return tag; } cmd->cid = cpu_to_be16(tag); cmd_ctx = &fdev->cmd_ctx[tag]; cmd_ctx->cb = cb; cmd_ctx->cb_data = cb_data; spin_lock(&funq->sq_lock); if (unlikely(fdev->suppress_cmds)) { rc = -ESHUTDOWN; sbitmap_queue_clear(&fdev->admin_sbq, tag, cpu); } else { cmd_ctx->cpu = cpu; memcpy(fun_sqe_at(funq, funq->sq_tail), cmd, cmdsize); dev_dbg(fdev->dev, "admin cmd @ %u: %8ph\n", funq->sq_tail, cmd); if (++funq->sq_tail == funq->sq_depth) funq->sq_tail = 0; writel(funq->sq_tail, funq->sq_db); } spin_unlock(&funq->sq_lock); return rc; } /* Abandon a pending admin command by clearing the issuer's callback data. * Failure indicates that the command either has already completed or its * completion is racing with this call. */ static bool fun_abandon_admin_cmd(struct fun_dev *fd, const struct fun_admin_req_common *cmd, void *cb_data) { u16 cid = be16_to_cpu(cmd->cid); struct fun_cmd_ctx *cmd_ctx = &fd->cmd_ctx[cid]; return cmpxchg(&cmd_ctx->cb_data, cb_data, NULL) == cb_data; } /* Stop submission of new admin commands and wake up any processes waiting for * tags. Already submitted commands are left to complete or time out. */ static void fun_admin_stop(struct fun_dev *fdev) { spin_lock(&fdev->admin_q->sq_lock); fdev->suppress_cmds = true; spin_unlock(&fdev->admin_q->sq_lock); sbitmap_queue_wake_all(&fdev->admin_sbq); } /* The callback for synchronous execution of admin commands. It copies the * command response to the caller's buffer and signals completion. */ static void fun_admin_cmd_sync_cb(struct fun_dev *fd, void *rsp, void *cb_data) { const struct fun_admin_rsp_common *rsp_common = rsp; struct fun_sync_cmd_ctx *ctx = cb_data; if (!ctx) return; /* command issuer timed out and left */ if (ctx->rsp_buf) { unsigned int rsp_len = rsp_common->len8 * 8; if (unlikely(rsp_len > ctx->rsp_len)) { dev_err(fd->dev, "response for op %u is %uB > response buffer %uB\n", rsp_common->op, rsp_len, ctx->rsp_len); rsp_len = ctx->rsp_len; } memcpy(ctx->rsp_buf, rsp, rsp_len); } ctx->rsp_status = rsp_common->ret; complete(&ctx->compl); } /* Submit a synchronous admin command. */ int fun_submit_admin_sync_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd, void *rsp, size_t rspsize, unsigned int timeout) { struct fun_sync_cmd_ctx ctx = { .compl = COMPLETION_INITIALIZER_ONSTACK(ctx.compl), .rsp_buf = rsp, .rsp_len = rspsize, }; unsigned int cmdlen = cmd->len8 * 8; unsigned long jiffies_left; int ret; ret = fun_submit_admin_cmd(fdev, cmd, fun_admin_cmd_sync_cb, &ctx, true); if (ret) return ret; if (!timeout) timeout = FUN_ADMIN_CMD_TO_MS; jiffies_left = wait_for_completion_timeout(&ctx.compl, msecs_to_jiffies(timeout)); if (!jiffies_left) { /* The command timed out. Attempt to cancel it so we can return. * But if the command is in the process of completing we'll * wait for it. */ if (fun_abandon_admin_cmd(fdev, cmd, &ctx)) { dev_err(fdev->dev, "admin command timed out: %*ph\n", cmdlen, cmd); fun_admin_stop(fdev); /* see if the timeout was due to a queue failure */ if (fun_adminq_stopped(fdev)) dev_err(fdev->dev, "device does not accept admin commands\n"); return -ETIMEDOUT; } wait_for_completion(&ctx.compl); } if (ctx.rsp_status) { dev_err(fdev->dev, "admin command failed, err %d: %*ph\n", ctx.rsp_status, cmdlen, cmd); } return -ctx.rsp_status; } EXPORT_SYMBOL_GPL(fun_submit_admin_sync_cmd); /* Return the number of device resources of the requested type. */ int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res) { union { struct fun_admin_res_count_req req; struct fun_admin_res_count_rsp rsp; } cmd; int rc; cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(cmd.req)); cmd.req.count = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_RES_COUNT, 0, 0); rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common, &cmd.rsp, sizeof(cmd), 0); return rc ? rc : be32_to_cpu(cmd.rsp.count.data); } EXPORT_SYMBOL_GPL(fun_get_res_count); /* Request that the instance of resource @res with the given id be deleted. */ int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res, unsigned int flags, u32 id) { struct fun_admin_generic_destroy_req req = { .common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(req)), .destroy = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_DESTROY, flags, id) }; return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0); } EXPORT_SYMBOL_GPL(fun_res_destroy); /* Bind two entities of the given types and IDs. */ int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0, unsigned int id0, enum fun_admin_bind_type type1, unsigned int id1) { struct { struct fun_admin_bind_req req; struct fun_admin_bind_entry entry[2]; } cmd = { .req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND, sizeof(cmd)), .entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0), .entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1), }; return fun_submit_admin_sync_cmd(fdev, &cmd.req.common, NULL, 0, 0); } EXPORT_SYMBOL_GPL(fun_bind); static int fun_get_dev_limits(struct fun_dev *fdev) { struct pci_dev *pdev = to_pci_dev(fdev->dev); unsigned int cq_count, sq_count, num_dbs; int rc; rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPCQ); if (rc < 0) return rc; cq_count = rc; rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPSQ); if (rc < 0) return rc; sq_count = rc; /* The admin queue consumes 1 CQ and at least 1 SQ. To be usable the * device must provide additional queues. */ if (cq_count < 2 || sq_count < 2 + !!fdev->admin_q->rq_depth) return -EINVAL; /* Calculate the max QID based on SQ/CQ/doorbell counts. * SQ/CQ doorbells alternate. */ num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) >> (2 + NVME_CAP_STRIDE(fdev->cap_reg)); fdev->max_qid = min3(cq_count, sq_count, num_dbs / 2) - 1; fdev->kern_end_qid = fdev->max_qid + 1; return 0; } /* Allocate all MSI-X vectors available on a function and at least @min_vecs. */ static int fun_alloc_irqs(struct pci_dev *pdev, unsigned int min_vecs) { int vecs, num_msix = pci_msix_vec_count(pdev); if (num_msix < 0) return num_msix; if (min_vecs > num_msix) return -ERANGE; vecs = pci_alloc_irq_vectors(pdev, min_vecs, num_msix, PCI_IRQ_MSIX); if (vecs > 0) { dev_info(&pdev->dev, "Allocated %d IRQ vectors of %d requested\n", vecs, num_msix); } else { dev_err(&pdev->dev, "Unable to allocate at least %u IRQ vectors\n", min_vecs); } return vecs; } /* Allocate and initialize the IRQ manager state. */ static int fun_alloc_irq_mgr(struct fun_dev *fdev) { fdev->irq_map = bitmap_zalloc(fdev->num_irqs, GFP_KERNEL); if (!fdev->irq_map) return -ENOMEM; spin_lock_init(&fdev->irqmgr_lock); /* mark IRQ 0 allocated, it is used by the admin queue */ __set_bit(0, fdev->irq_map); fdev->irqs_avail = fdev->num_irqs - 1; return 0; } /* Reserve @nirqs of the currently available IRQs and return their indices. */ int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs, u16 *irq_indices) { unsigned int b, n = 0; int err = -ENOSPC; if (!nirqs) return 0; spin_lock(&fdev->irqmgr_lock); if (nirqs > fdev->irqs_avail) goto unlock; for_each_clear_bit(b, fdev->irq_map, fdev->num_irqs) { __set_bit(b, fdev->irq_map); irq_indices[n++] = b; if (n >= nirqs) break; } WARN_ON(n < nirqs); fdev->irqs_avail -= n; err = n; unlock: spin_unlock(&fdev->irqmgr_lock); return err; } EXPORT_SYMBOL(fun_reserve_irqs); /* Release @nirqs previously allocated IRQS with the supplied indices. */ void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs, u16 *irq_indices) { unsigned int i; spin_lock(&fdev->irqmgr_lock); for (i = 0; i < nirqs; i++) __clear_bit(irq_indices[i], fdev->irq_map); fdev->irqs_avail += nirqs; spin_unlock(&fdev->irqmgr_lock); } EXPORT_SYMBOL(fun_release_irqs); static void fun_serv_handler(struct work_struct *work) { struct fun_dev *fd = container_of(work, struct fun_dev, service_task); if (test_bit(FUN_SERV_DISABLED, &fd->service_flags)) return; if (fd->serv_cb) fd->serv_cb(fd); } void fun_serv_stop(struct fun_dev *fd) { set_bit(FUN_SERV_DISABLED, &fd->service_flags); cancel_work_sync(&fd->service_task); } EXPORT_SYMBOL_GPL(fun_serv_stop); void fun_serv_restart(struct fun_dev *fd) { clear_bit(FUN_SERV_DISABLED, &fd->service_flags); if (fd->service_flags) schedule_work(&fd->service_task); } EXPORT_SYMBOL_GPL(fun_serv_restart); void fun_serv_sched(struct fun_dev *fd) { if (!test_bit(FUN_SERV_DISABLED, &fd->service_flags)) schedule_work(&fd->service_task); } EXPORT_SYMBOL_GPL(fun_serv_sched); /* Check and try to get the device into a proper state for initialization, * i.e., CSTS.RDY = CC.EN = 0. */ static int sanitize_dev(struct fun_dev *fdev) { int rc; fdev->cap_reg = readq(fdev->bar + NVME_REG_CAP); fdev->cc_reg = readl(fdev->bar + NVME_REG_CC); /* First get RDY to agree with the current EN. Give RDY the opportunity * to complete a potential recent EN change. */ rc = fun_wait_ready(fdev, fdev->cc_reg & NVME_CC_ENABLE); if (rc) return rc; /* Next, reset the device if EN is currently 1. */ if (fdev->cc_reg & NVME_CC_ENABLE) rc = fun_disable_ctrl(fdev); return rc; } /* Undo the device initialization of fun_dev_enable(). */ void fun_dev_disable(struct fun_dev *fdev) { struct pci_dev *pdev = to_pci_dev(fdev->dev); pci_set_drvdata(pdev, NULL); if (fdev->fw_handle != FUN_HCI_ID_INVALID) { fun_res_destroy(fdev, FUN_ADMIN_OP_SWUPGRADE, 0, fdev->fw_handle); fdev->fw_handle = FUN_HCI_ID_INVALID; } fun_disable_admin_queue(fdev); bitmap_free(fdev->irq_map); pci_free_irq_vectors(pdev); pci_disable_device(pdev); fun_unmap_bars(fdev); } EXPORT_SYMBOL(fun_dev_disable); /* Perform basic initialization of a device, including * - PCI config space setup and BAR0 mapping * - interrupt management initialization * - 1 admin queue setup * - determination of some device limits, such as number of queues. */ int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev, const struct fun_dev_params *areq, const char *name) { int rc; fdev->dev = &pdev->dev; rc = fun_map_bars(fdev, name); if (rc) return rc; rc = fun_set_dma_masks(fdev->dev); if (rc) goto unmap; rc = pci_enable_device_mem(pdev); if (rc) { dev_err(&pdev->dev, "Couldn't enable device, err %d\n", rc); goto unmap; } rc = sanitize_dev(fdev); if (rc) goto disable_dev; fdev->fw_handle = FUN_HCI_ID_INVALID; fdev->q_depth = NVME_CAP_MQES(fdev->cap_reg) + 1; fdev->db_stride = 1 << NVME_CAP_STRIDE(fdev->cap_reg); fdev->dbs = fdev->bar + NVME_REG_DBS; INIT_WORK(&fdev->service_task, fun_serv_handler); fdev->service_flags = FUN_SERV_DISABLED; fdev->serv_cb = areq->serv_cb; rc = fun_alloc_irqs(pdev, areq->min_msix + 1); /* +1 for admin CQ */ if (rc < 0) goto disable_dev; fdev->num_irqs = rc; rc = fun_alloc_irq_mgr(fdev); if (rc) goto free_irqs; pci_set_master(pdev); rc = fun_enable_admin_queue(fdev, areq); if (rc) goto free_irq_mgr; rc = fun_get_dev_limits(fdev); if (rc < 0) goto disable_admin; pci_save_state(pdev); pci_set_drvdata(pdev, fdev); pcie_print_link_status(pdev); dev_dbg(fdev->dev, "q_depth %u, db_stride %u, max qid %d kern_end_qid %d\n", fdev->q_depth, fdev->db_stride, fdev->max_qid, fdev->kern_end_qid); return 0; disable_admin: fun_disable_admin_queue(fdev); free_irq_mgr: bitmap_free(fdev->irq_map); free_irqs: pci_free_irq_vectors(pdev); disable_dev: pci_disable_device(pdev); unmap: fun_unmap_bars(fdev); return rc; } EXPORT_SYMBOL(fun_dev_enable); MODULE_AUTHOR("Dimitris Michailidis <[email protected]>"); MODULE_DESCRIPTION("Core services driver for Fungible devices"); MODULE_LICENSE("Dual BSD/GPL");
linux-master
drivers/net/ethernet/fungible/funcore/fun_dev.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/log2.h> #include <linux/mm.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/slab.h> #include "fun_dev.h" #include "fun_queue.h" /* Allocate memory for a queue. This includes the memory for the HW descriptor * ring, an optional 64b HW write-back area, and an optional SW state ring. * Returns the virtual and DMA addresses of the HW ring, the VA of the SW ring, * and the VA of the write-back area. */ void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz, size_t sw_desc_sz, bool wb, int numa_node, dma_addr_t *dma_addr, void **sw_va, volatile __be64 **wb_va) { int dev_node = dev_to_node(dma_dev); size_t dma_sz; void *va; if (numa_node == NUMA_NO_NODE) numa_node = dev_node; /* Place optional write-back area at end of descriptor ring. */ dma_sz = hw_desc_sz * depth; if (wb) dma_sz += sizeof(u64); set_dev_node(dma_dev, numa_node); va = dma_alloc_coherent(dma_dev, dma_sz, dma_addr, GFP_KERNEL); set_dev_node(dma_dev, dev_node); if (!va) return NULL; if (sw_desc_sz) { *sw_va = kvzalloc_node(sw_desc_sz * depth, GFP_KERNEL, numa_node); if (!*sw_va) { dma_free_coherent(dma_dev, dma_sz, va, *dma_addr); return NULL; } } if (wb) *wb_va = va + dma_sz - sizeof(u64); return va; } EXPORT_SYMBOL_GPL(fun_alloc_ring_mem); void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz, bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va) { if (hw_va) { size_t sz = depth * hw_desc_sz; if (wb) sz += sizeof(u64); dma_free_coherent(dma_dev, sz, hw_va, dma_addr); } kvfree(sw_va); } EXPORT_SYMBOL_GPL(fun_free_ring_mem); /* Prepare and issue an admin command to create an SQ on the device with the * provided parameters. If the queue ID is auto-allocated by the device it is * returned in *sqidp. */ int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid, u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr, u8 coal_nentries, u8 coal_usec, u32 irq_num, u32 scan_start_id, u32 scan_end_id, u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp) { union { struct fun_admin_epsq_req req; struct fun_admin_generic_create_rsp rsp; } cmd; dma_addr_t wb_addr; u32 hw_qid; int rc; if (sq_depth > fdev->q_depth) return -EINVAL; if (flags & FUN_ADMIN_EPSQ_CREATE_FLAG_RQ) sqe_size_log2 = ilog2(sizeof(struct fun_eprq_rqbuf)); wb_addr = dma_addr + (sq_depth << sqe_size_log2); cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPSQ, sizeof(cmd.req)); cmd.req.u.create = FUN_ADMIN_EPSQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags, sqid, cqid, sqe_size_log2, sq_depth - 1, dma_addr, 0, coal_nentries, coal_usec, irq_num, scan_start_id, scan_end_id, 0, rq_buf_size_log2, ilog2(sizeof(u64)), wb_addr); rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common, &cmd.rsp, sizeof(cmd.rsp), 0); if (rc) return rc; hw_qid = be32_to_cpu(cmd.rsp.id); *dbp = fun_sq_db_addr(fdev, hw_qid); if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR) *sqidp = hw_qid; return rc; } EXPORT_SYMBOL_GPL(fun_sq_create); /* Prepare and issue an admin command to create a CQ on the device with the * provided parameters. If the queue ID is auto-allocated by the device it is * returned in *cqidp. */ int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid, u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr, u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec, u32 irq_num, u32 scan_start_id, u32 scan_end_id, u32 *cqidp, u32 __iomem **dbp) { union { struct fun_admin_epcq_req req; struct fun_admin_generic_create_rsp rsp; } cmd; u32 hw_qid; int rc; if (cq_depth > fdev->q_depth) return -EINVAL; cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ, sizeof(cmd.req)); cmd.req.u.create = FUN_ADMIN_EPCQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags, cqid, rqid, cqe_size_log2, cq_depth - 1, dma_addr, tailroom, headroom / 2, 0, coal_nentries, coal_usec, irq_num, scan_start_id, scan_end_id, 0); rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common, &cmd.rsp, sizeof(cmd.rsp), 0); if (rc) return rc; hw_qid = be32_to_cpu(cmd.rsp.id); *dbp = fun_cq_db_addr(fdev, hw_qid); if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR) *cqidp = hw_qid; return rc; } EXPORT_SYMBOL_GPL(fun_cq_create); static bool fun_sq_is_head_wb(const struct fun_queue *funq) { return funq->sq_flags & FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS; } static void fun_clean_rq(struct fun_queue *funq) { struct fun_dev *fdev = funq->fdev; struct fun_rq_info *rqinfo; unsigned int i; for (i = 0; i < funq->rq_depth; i++) { rqinfo = &funq->rq_info[i]; if (rqinfo->page) { dma_unmap_page(fdev->dev, rqinfo->dma, PAGE_SIZE, DMA_FROM_DEVICE); put_page(rqinfo->page); rqinfo->page = NULL; } } } static int fun_fill_rq(struct fun_queue *funq) { struct device *dev = funq->fdev->dev; int i, node = dev_to_node(dev); struct fun_rq_info *rqinfo; for (i = 0; i < funq->rq_depth; i++) { rqinfo = &funq->rq_info[i]; rqinfo->page = alloc_pages_node(node, GFP_KERNEL, 0); if (unlikely(!rqinfo->page)) return -ENOMEM; rqinfo->dma = dma_map_page(dev, rqinfo->page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(dev, rqinfo->dma))) { put_page(rqinfo->page); rqinfo->page = NULL; return -ENOMEM; } funq->rqes[i] = FUN_EPRQ_RQBUF_INIT(rqinfo->dma); } funq->rq_tail = funq->rq_depth - 1; return 0; } static void fun_rq_update_pos(struct fun_queue *funq, int buf_offset) { if (buf_offset <= funq->rq_buf_offset) { struct fun_rq_info *rqinfo = &funq->rq_info[funq->rq_buf_idx]; struct device *dev = funq->fdev->dev; dma_sync_single_for_device(dev, rqinfo->dma, PAGE_SIZE, DMA_FROM_DEVICE); funq->num_rqe_to_fill++; if (++funq->rq_buf_idx == funq->rq_depth) funq->rq_buf_idx = 0; } funq->rq_buf_offset = buf_offset; } /* Given a command response with data scattered across >= 1 RQ buffers return * a pointer to a contiguous buffer containing all the data. If the data is in * one RQ buffer the start address within that buffer is returned, otherwise a * new buffer is allocated and the data is gathered into it. */ static void *fun_data_from_rq(struct fun_queue *funq, const struct fun_rsp_common *rsp, bool *need_free) { u32 bufoff, total_len, remaining, fragsize, dataoff; struct device *dma_dev = funq->fdev->dev; const struct fun_dataop_rqbuf *databuf; const struct fun_dataop_hdr *dataop; const struct fun_rq_info *rqinfo; void *data; dataop = (void *)rsp + rsp->suboff8 * 8; total_len = be32_to_cpu(dataop->total_len); if (likely(dataop->nsgl == 1)) { databuf = (struct fun_dataop_rqbuf *)dataop->imm; bufoff = be32_to_cpu(databuf->bufoff); fun_rq_update_pos(funq, bufoff); rqinfo = &funq->rq_info[funq->rq_buf_idx]; dma_sync_single_for_cpu(dma_dev, rqinfo->dma + bufoff, total_len, DMA_FROM_DEVICE); *need_free = false; return page_address(rqinfo->page) + bufoff; } /* For scattered completions gather the fragments into one buffer. */ data = kmalloc(total_len, GFP_ATOMIC); /* NULL is OK here. In case of failure we still need to consume the data * for proper buffer accounting but indicate an error in the response. */ if (likely(data)) *need_free = true; dataoff = 0; for (remaining = total_len; remaining; remaining -= fragsize) { fun_rq_update_pos(funq, 0); fragsize = min_t(unsigned int, PAGE_SIZE, remaining); if (data) { rqinfo = &funq->rq_info[funq->rq_buf_idx]; dma_sync_single_for_cpu(dma_dev, rqinfo->dma, fragsize, DMA_FROM_DEVICE); memcpy(data + dataoff, page_address(rqinfo->page), fragsize); dataoff += fragsize; } } return data; } unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max) { const struct fun_cqe_info *info; struct fun_rsp_common *rsp; unsigned int new_cqes; u16 sf_p, flags; bool need_free; void *cqe; if (!max) max = funq->cq_depth - 1; for (new_cqes = 0; new_cqes < max; new_cqes++) { cqe = funq->cqes + (funq->cq_head << funq->cqe_size_log2); info = funq_cqe_info(funq, cqe); sf_p = be16_to_cpu(info->sf_p); if ((sf_p & 1) != funq->cq_phase) break; /* ensure the phase tag is read before other CQE fields */ dma_rmb(); if (++funq->cq_head == funq->cq_depth) { funq->cq_head = 0; funq->cq_phase = !funq->cq_phase; } rsp = cqe; flags = be16_to_cpu(rsp->flags); need_free = false; if (unlikely(flags & FUN_REQ_COMMON_FLAG_CQE_IN_RQBUF)) { rsp = fun_data_from_rq(funq, rsp, &need_free); if (!rsp) { rsp = cqe; rsp->len8 = 1; if (rsp->ret == 0) rsp->ret = ENOMEM; } } if (funq->cq_cb) funq->cq_cb(funq, funq->cb_data, rsp, info); if (need_free) kfree(rsp); } dev_dbg(funq->fdev->dev, "CQ %u, new CQEs %u/%u, head %u, phase %u\n", funq->cqid, new_cqes, max, funq->cq_head, funq->cq_phase); return new_cqes; } unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max) { unsigned int processed; u32 db; processed = __fun_process_cq(funq, max); if (funq->num_rqe_to_fill) { funq->rq_tail = (funq->rq_tail + funq->num_rqe_to_fill) % funq->rq_depth; funq->num_rqe_to_fill = 0; writel(funq->rq_tail, funq->rq_db); } db = funq->cq_head | FUN_DB_IRQ_ARM_F; writel(db, funq->cq_db); return processed; } static int fun_alloc_sqes(struct fun_queue *funq) { funq->sq_cmds = fun_alloc_ring_mem(funq->fdev->dev, funq->sq_depth, 1 << funq->sqe_size_log2, 0, fun_sq_is_head_wb(funq), NUMA_NO_NODE, &funq->sq_dma_addr, NULL, &funq->sq_head); return funq->sq_cmds ? 0 : -ENOMEM; } static int fun_alloc_cqes(struct fun_queue *funq) { funq->cqes = fun_alloc_ring_mem(funq->fdev->dev, funq->cq_depth, 1 << funq->cqe_size_log2, 0, false, NUMA_NO_NODE, &funq->cq_dma_addr, NULL, NULL); return funq->cqes ? 0 : -ENOMEM; } static int fun_alloc_rqes(struct fun_queue *funq) { funq->rqes = fun_alloc_ring_mem(funq->fdev->dev, funq->rq_depth, sizeof(*funq->rqes), sizeof(*funq->rq_info), false, NUMA_NO_NODE, &funq->rq_dma_addr, (void **)&funq->rq_info, NULL); return funq->rqes ? 0 : -ENOMEM; } /* Free a queue's structures. */ void fun_free_queue(struct fun_queue *funq) { struct device *dev = funq->fdev->dev; fun_free_ring_mem(dev, funq->cq_depth, 1 << funq->cqe_size_log2, false, funq->cqes, funq->cq_dma_addr, NULL); fun_free_ring_mem(dev, funq->sq_depth, 1 << funq->sqe_size_log2, fun_sq_is_head_wb(funq), funq->sq_cmds, funq->sq_dma_addr, NULL); if (funq->rqes) { fun_clean_rq(funq); fun_free_ring_mem(dev, funq->rq_depth, sizeof(*funq->rqes), false, funq->rqes, funq->rq_dma_addr, funq->rq_info); } kfree(funq); } /* Allocate and initialize a funq's structures. */ struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid, const struct fun_queue_alloc_req *req) { struct fun_queue *funq = kzalloc(sizeof(*funq), GFP_KERNEL); if (!funq) return NULL; funq->fdev = fdev; spin_lock_init(&funq->sq_lock); funq->qid = qid; /* Initial CQ/SQ/RQ ids */ if (req->rq_depth) { funq->cqid = 2 * qid; if (funq->qid) { /* I/O Q: use rqid = cqid, sqid = +1 */ funq->rqid = funq->cqid; funq->sqid = funq->rqid + 1; } else { /* Admin Q: sqid is always 0, use ID 1 for RQ */ funq->sqid = 0; funq->rqid = 1; } } else { funq->cqid = qid; funq->sqid = qid; } funq->cq_flags = req->cq_flags; funq->sq_flags = req->sq_flags; funq->cqe_size_log2 = req->cqe_size_log2; funq->sqe_size_log2 = req->sqe_size_log2; funq->cq_depth = req->cq_depth; funq->sq_depth = req->sq_depth; funq->cq_intcoal_nentries = req->cq_intcoal_nentries; funq->cq_intcoal_usec = req->cq_intcoal_usec; funq->sq_intcoal_nentries = req->sq_intcoal_nentries; funq->sq_intcoal_usec = req->sq_intcoal_usec; if (fun_alloc_cqes(funq)) goto free_funq; funq->cq_phase = 1; if (fun_alloc_sqes(funq)) goto free_funq; if (req->rq_depth) { funq->rq_flags = req->rq_flags | FUN_ADMIN_EPSQ_CREATE_FLAG_RQ; funq->rq_depth = req->rq_depth; funq->rq_buf_offset = -1; if (fun_alloc_rqes(funq) || fun_fill_rq(funq)) goto free_funq; } funq->cq_vector = -1; funq->cqe_info_offset = (1 << funq->cqe_size_log2) - sizeof(struct fun_cqe_info); /* SQ/CQ 0 are implicitly created, assign their doorbells now. * Other queues are assigned doorbells at their explicit creation. */ if (funq->sqid == 0) funq->sq_db = fun_sq_db_addr(fdev, 0); if (funq->cqid == 0) funq->cq_db = fun_cq_db_addr(fdev, 0); return funq; free_funq: fun_free_queue(funq); return NULL; } /* Create a funq's CQ on the device. */ static int fun_create_cq(struct fun_queue *funq) { struct fun_dev *fdev = funq->fdev; unsigned int rqid; int rc; rqid = funq->cq_flags & FUN_ADMIN_EPCQ_CREATE_FLAG_RQ ? funq->rqid : FUN_HCI_ID_INVALID; rc = fun_cq_create(fdev, funq->cq_flags, funq->cqid, rqid, funq->cqe_size_log2, funq->cq_depth, funq->cq_dma_addr, 0, 0, funq->cq_intcoal_nentries, funq->cq_intcoal_usec, funq->cq_vector, 0, 0, &funq->cqid, &funq->cq_db); if (!rc) dev_dbg(fdev->dev, "created CQ %u\n", funq->cqid); return rc; } /* Create a funq's SQ on the device. */ static int fun_create_sq(struct fun_queue *funq) { struct fun_dev *fdev = funq->fdev; int rc; rc = fun_sq_create(fdev, funq->sq_flags, funq->sqid, funq->cqid, funq->sqe_size_log2, funq->sq_depth, funq->sq_dma_addr, funq->sq_intcoal_nentries, funq->sq_intcoal_usec, funq->cq_vector, 0, 0, 0, &funq->sqid, &funq->sq_db); if (!rc) dev_dbg(fdev->dev, "created SQ %u\n", funq->sqid); return rc; } /* Create a funq's RQ on the device. */ int fun_create_rq(struct fun_queue *funq) { struct fun_dev *fdev = funq->fdev; int rc; rc = fun_sq_create(fdev, funq->rq_flags, funq->rqid, funq->cqid, 0, funq->rq_depth, funq->rq_dma_addr, 0, 0, funq->cq_vector, 0, 0, PAGE_SHIFT, &funq->rqid, &funq->rq_db); if (!rc) dev_dbg(fdev->dev, "created RQ %u\n", funq->rqid); return rc; } static unsigned int funq_irq(struct fun_queue *funq) { return pci_irq_vector(to_pci_dev(funq->fdev->dev), funq->cq_vector); } int fun_request_irq(struct fun_queue *funq, const char *devname, irq_handler_t handler, void *data) { int rc; if (funq->cq_vector < 0) return -EINVAL; funq->irq_handler = handler; funq->irq_data = data; snprintf(funq->irqname, sizeof(funq->irqname), funq->qid ? "%s-q[%d]" : "%s-adminq", devname, funq->qid); rc = request_irq(funq_irq(funq), handler, 0, funq->irqname, data); if (rc) funq->irq_handler = NULL; return rc; } /* Create all component queues of a funq on the device. */ int fun_create_queue(struct fun_queue *funq) { int rc; rc = fun_create_cq(funq); if (rc) return rc; if (funq->rq_depth) { rc = fun_create_rq(funq); if (rc) goto release_cq; } rc = fun_create_sq(funq); if (rc) goto release_rq; return 0; release_rq: fun_destroy_sq(funq->fdev, funq->rqid); release_cq: fun_destroy_cq(funq->fdev, funq->cqid); return rc; } void fun_free_irq(struct fun_queue *funq) { if (funq->irq_handler) { unsigned int vector = funq_irq(funq); free_irq(vector, funq->irq_data); funq->irq_handler = NULL; funq->irq_data = NULL; } }
linux-master
drivers/net/ethernet/fungible/funcore/fun_queue.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) #include <linux/bpf.h> #include <linux/crash_dump.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/filter.h> #include <linux/idr.h> #include <linux/if_vlan.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/rtnetlink.h> #include <linux/inetdevice.h> #include "funeth.h" #include "funeth_devlink.h" #include "funeth_ktls.h" #include "fun_port.h" #include "fun_queue.h" #include "funeth_txrx.h" #define ADMIN_SQ_DEPTH 32 #define ADMIN_CQ_DEPTH 64 #define ADMIN_RQ_DEPTH 16 /* Default number of Tx/Rx queues. */ #define FUN_DFLT_QUEUES 16U enum { FUN_SERV_RES_CHANGE = FUN_SERV_FIRST_AVAIL, FUN_SERV_DEL_PORTS, }; static const struct pci_device_id funeth_id_table[] = { { PCI_VDEVICE(FUNGIBLE, 0x0101) }, { PCI_VDEVICE(FUNGIBLE, 0x0181) }, { 0, } }; /* Issue a port write admin command with @n key/value pairs. */ static int fun_port_write_cmds(struct funeth_priv *fp, unsigned int n, const int *keys, const u64 *data) { unsigned int cmd_size, i; union { struct fun_admin_port_req req; struct fun_admin_port_rsp rsp; u8 v[ADMIN_SQE_SIZE]; } cmd; cmd_size = offsetof(struct fun_admin_port_req, u.write.write48) + n * sizeof(struct fun_admin_write48_req); if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN) return -EINVAL; cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT, cmd_size); cmd.req.u.write = FUN_ADMIN_PORT_WRITE_REQ_INIT(FUN_ADMIN_SUBOP_WRITE, 0, fp->netdev->dev_port); for (i = 0; i < n; i++) cmd.req.u.write.write48[i] = FUN_ADMIN_WRITE48_REQ_INIT(keys[i], data[i]); return fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp, cmd_size, 0); } int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data) { return fun_port_write_cmds(fp, 1, &key, &data); } /* Issue a port read admin command with @n key/value pairs. */ static int fun_port_read_cmds(struct funeth_priv *fp, unsigned int n, const int *keys, u64 *data) { const struct fun_admin_read48_rsp *r48rsp; unsigned int cmd_size, i; int rc; union { struct fun_admin_port_req req; struct fun_admin_port_rsp rsp; u8 v[ADMIN_SQE_SIZE]; } cmd; cmd_size = offsetof(struct fun_admin_port_req, u.read.read48) + n * sizeof(struct fun_admin_read48_req); if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN) return -EINVAL; cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT, cmd_size); cmd.req.u.read = FUN_ADMIN_PORT_READ_REQ_INIT(FUN_ADMIN_SUBOP_READ, 0, fp->netdev->dev_port); for (i = 0; i < n; i++) cmd.req.u.read.read48[i] = FUN_ADMIN_READ48_REQ_INIT(keys[i]); rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp, cmd_size, 0); if (rc) return rc; for (r48rsp = cmd.rsp.u.read.read48, i = 0; i < n; i++, r48rsp++) { data[i] = FUN_ADMIN_READ48_RSP_DATA_G(r48rsp->key_to_data); dev_dbg(fp->fdev->dev, "port_read_rsp lport=%u (key_to_data=0x%llx) key=%d data:%lld retval:%lld", fp->lport, r48rsp->key_to_data, keys[i], data[i], FUN_ADMIN_READ48_RSP_RET_G(r48rsp->key_to_data)); } return 0; } int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data) { return fun_port_read_cmds(fp, 1, &key, data); } static void fun_report_link(struct net_device *netdev) { if (netif_carrier_ok(netdev)) { const struct funeth_priv *fp = netdev_priv(netdev); const char *fec = "", *pause = ""; int speed = fp->link_speed; char unit = 'M'; if (fp->link_speed >= SPEED_1000) { speed /= 1000; unit = 'G'; } if (fp->active_fec & FUN_PORT_FEC_RS) fec = ", RS-FEC"; else if (fp->active_fec & FUN_PORT_FEC_FC) fec = ", BASER-FEC"; if ((fp->active_fc & FUN_PORT_CAP_PAUSE_MASK) == FUN_PORT_CAP_PAUSE_MASK) pause = ", Tx/Rx PAUSE"; else if (fp->active_fc & FUN_PORT_CAP_RX_PAUSE) pause = ", Rx PAUSE"; else if (fp->active_fc & FUN_PORT_CAP_TX_PAUSE) pause = ", Tx PAUSE"; netdev_info(netdev, "Link up at %d %cb/s full-duplex%s%s\n", speed, unit, pause, fec); } else { netdev_info(netdev, "Link down\n"); } } static int fun_adi_write(struct fun_dev *fdev, enum fun_admin_adi_attr attr, unsigned int adi_id, const struct fun_adi_param *param) { struct fun_admin_adi_req req = { .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ADI, sizeof(req)), .u.write.subop = FUN_ADMIN_SUBOP_WRITE, .u.write.attribute = attr, .u.write.id = cpu_to_be32(adi_id), .u.write.param = *param }; return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0); } /* Configure RSS for the given port. @op determines whether a new RSS context * is to be created or whether an existing one should be reconfigured. The * remaining parameters specify the hashing algorithm, key, and indirection * table. * * This initiates packet delivery to the Rx queues set in the indirection * table. */ int fun_config_rss(struct net_device *dev, int algo, const u8 *key, const u32 *qtable, u8 op) { struct funeth_priv *fp = netdev_priv(dev); unsigned int table_len = fp->indir_table_nentries; unsigned int len = FUN_ETH_RSS_MAX_KEY_SIZE + sizeof(u32) * table_len; struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs); union { struct { struct fun_admin_rss_req req; struct fun_dataop_gl gl; }; struct fun_admin_generic_create_rsp rsp; } cmd; __be32 *indir_tab; u16 flags; int rc; if (op != FUN_ADMIN_SUBOP_CREATE && fp->rss_hw_id == FUN_HCI_ID_INVALID) return -EINVAL; flags = op == FUN_ADMIN_SUBOP_CREATE ? FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR : 0; cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_RSS, sizeof(cmd)); cmd.req.u.create = FUN_ADMIN_RSS_CREATE_REQ_INIT(op, flags, fp->rss_hw_id, dev->dev_port, algo, FUN_ETH_RSS_MAX_KEY_SIZE, table_len, 0, FUN_ETH_RSS_MAX_KEY_SIZE); cmd.req.u.create.dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len); fun_dataop_gl_init(&cmd.gl, 0, 0, len, fp->rss_dma_addr); /* write the key and indirection table into the RSS DMA area */ memcpy(fp->rss_cfg, key, FUN_ETH_RSS_MAX_KEY_SIZE); indir_tab = fp->rss_cfg + FUN_ETH_RSS_MAX_KEY_SIZE; for (rc = 0; rc < table_len; rc++) *indir_tab++ = cpu_to_be32(rxqs[*qtable++]->hw_cqid); rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp, sizeof(cmd.rsp), 0); if (!rc && op == FUN_ADMIN_SUBOP_CREATE) fp->rss_hw_id = be32_to_cpu(cmd.rsp.id); return rc; } /* Destroy the HW RSS conntext associated with the given port. This also stops * all packet delivery to our Rx queues. */ static void fun_destroy_rss(struct funeth_priv *fp) { if (fp->rss_hw_id != FUN_HCI_ID_INVALID) { fun_res_destroy(fp->fdev, FUN_ADMIN_OP_RSS, 0, fp->rss_hw_id); fp->rss_hw_id = FUN_HCI_ID_INVALID; } } static void fun_irq_aff_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) { struct fun_irq *p = container_of(notify, struct fun_irq, aff_notify); cpumask_copy(&p->affinity_mask, mask); } static void fun_irq_aff_release(struct kref __always_unused *ref) { } /* Allocate an IRQ structure, assign an MSI-X index and initial affinity to it, * and add it to the IRQ XArray. */ static struct fun_irq *fun_alloc_qirq(struct funeth_priv *fp, unsigned int idx, int node, unsigned int xa_idx_offset) { struct fun_irq *irq; int cpu, res; cpu = cpumask_local_spread(idx, node); node = cpu_to_mem(cpu); irq = kzalloc_node(sizeof(*irq), GFP_KERNEL, node); if (!irq) return ERR_PTR(-ENOMEM); res = fun_reserve_irqs(fp->fdev, 1, &irq->irq_idx); if (res != 1) goto free_irq; res = xa_insert(&fp->irqs, idx + xa_idx_offset, irq, GFP_KERNEL); if (res) goto release_irq; irq->irq = pci_irq_vector(fp->pdev, irq->irq_idx); cpumask_set_cpu(cpu, &irq->affinity_mask); irq->aff_notify.notify = fun_irq_aff_notify; irq->aff_notify.release = fun_irq_aff_release; irq->state = FUN_IRQ_INIT; return irq; release_irq: fun_release_irqs(fp->fdev, 1, &irq->irq_idx); free_irq: kfree(irq); return ERR_PTR(res); } static void fun_free_qirq(struct funeth_priv *fp, struct fun_irq *irq) { netif_napi_del(&irq->napi); fun_release_irqs(fp->fdev, 1, &irq->irq_idx); kfree(irq); } /* Release the IRQs reserved for Tx/Rx queues that aren't being used. */ static void fun_prune_queue_irqs(struct net_device *dev) { struct funeth_priv *fp = netdev_priv(dev); unsigned int nreleased = 0; struct fun_irq *irq; unsigned long idx; xa_for_each(&fp->irqs, idx, irq) { if (irq->txq || irq->rxq) /* skip those in use */ continue; xa_erase(&fp->irqs, idx); fun_free_qirq(fp, irq); nreleased++; if (idx < fp->rx_irq_ofst) fp->num_tx_irqs--; else fp->num_rx_irqs--; } netif_info(fp, intr, dev, "Released %u queue IRQs\n", nreleased); } /* Reserve IRQs, one per queue, to acommodate the requested queue numbers @ntx * and @nrx. IRQs are added incrementally to those we already have. * We hold on to allocated IRQs until garbage collection of unused IRQs is * separately requested. */ static int fun_alloc_queue_irqs(struct net_device *dev, unsigned int ntx, unsigned int nrx) { struct funeth_priv *fp = netdev_priv(dev); int node = dev_to_node(&fp->pdev->dev); struct fun_irq *irq; unsigned int i; for (i = fp->num_tx_irqs; i < ntx; i++) { irq = fun_alloc_qirq(fp, i, node, 0); if (IS_ERR(irq)) return PTR_ERR(irq); fp->num_tx_irqs++; netif_napi_add_tx(dev, &irq->napi, fun_txq_napi_poll); } for (i = fp->num_rx_irqs; i < nrx; i++) { irq = fun_alloc_qirq(fp, i, node, fp->rx_irq_ofst); if (IS_ERR(irq)) return PTR_ERR(irq); fp->num_rx_irqs++; netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll); } netif_info(fp, intr, dev, "Reserved %u/%u IRQs for Tx/Rx queues\n", ntx, nrx); return 0; } static void free_txqs(struct funeth_txq **txqs, unsigned int nqs, unsigned int start, int state) { unsigned int i; for (i = start; i < nqs && txqs[i]; i++) txqs[i] = funeth_txq_free(txqs[i], state); } static int alloc_txqs(struct net_device *dev, struct funeth_txq **txqs, unsigned int nqs, unsigned int depth, unsigned int start, int state) { struct funeth_priv *fp = netdev_priv(dev); unsigned int i; int err; for (i = start; i < nqs; i++) { err = funeth_txq_create(dev, i, depth, xa_load(&fp->irqs, i), state, &txqs[i]); if (err) { free_txqs(txqs, nqs, start, FUN_QSTATE_DESTROYED); return err; } } return 0; } static void free_rxqs(struct funeth_rxq **rxqs, unsigned int nqs, unsigned int start, int state) { unsigned int i; for (i = start; i < nqs && rxqs[i]; i++) rxqs[i] = funeth_rxq_free(rxqs[i], state); } static int alloc_rxqs(struct net_device *dev, struct funeth_rxq **rxqs, unsigned int nqs, unsigned int ncqe, unsigned int nrqe, unsigned int start, int state) { struct funeth_priv *fp = netdev_priv(dev); unsigned int i; int err; for (i = start; i < nqs; i++) { err = funeth_rxq_create(dev, i, ncqe, nrqe, xa_load(&fp->irqs, i + fp->rx_irq_ofst), state, &rxqs[i]); if (err) { free_rxqs(rxqs, nqs, start, FUN_QSTATE_DESTROYED); return err; } } return 0; } static void free_xdpqs(struct funeth_txq **xdpqs, unsigned int nqs, unsigned int start, int state) { unsigned int i; for (i = start; i < nqs && xdpqs[i]; i++) xdpqs[i] = funeth_txq_free(xdpqs[i], state); if (state == FUN_QSTATE_DESTROYED) kfree(xdpqs); } static struct funeth_txq **alloc_xdpqs(struct net_device *dev, unsigned int nqs, unsigned int depth, unsigned int start, int state) { struct funeth_txq **xdpqs; unsigned int i; int err; xdpqs = kcalloc(nqs, sizeof(*xdpqs), GFP_KERNEL); if (!xdpqs) return ERR_PTR(-ENOMEM); for (i = start; i < nqs; i++) { err = funeth_txq_create(dev, i, depth, NULL, state, &xdpqs[i]); if (err) { free_xdpqs(xdpqs, nqs, start, FUN_QSTATE_DESTROYED); return ERR_PTR(err); } } return xdpqs; } static void fun_free_rings(struct net_device *netdev, struct fun_qset *qset) { struct funeth_priv *fp = netdev_priv(netdev); struct funeth_txq **xdpqs = qset->xdpqs; struct funeth_rxq **rxqs = qset->rxqs; /* qset may not specify any queues to operate on. In that case the * currently installed queues are implied. */ if (!rxqs) { rxqs = rtnl_dereference(fp->rxqs); xdpqs = rtnl_dereference(fp->xdpqs); qset->txqs = fp->txqs; qset->nrxqs = netdev->real_num_rx_queues; qset->ntxqs = netdev->real_num_tx_queues; qset->nxdpqs = fp->num_xdpqs; } if (!rxqs) return; if (rxqs == rtnl_dereference(fp->rxqs)) { rcu_assign_pointer(fp->rxqs, NULL); rcu_assign_pointer(fp->xdpqs, NULL); synchronize_net(); fp->txqs = NULL; } free_rxqs(rxqs, qset->nrxqs, qset->rxq_start, qset->state); free_txqs(qset->txqs, qset->ntxqs, qset->txq_start, qset->state); free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, qset->state); if (qset->state == FUN_QSTATE_DESTROYED) kfree(rxqs); /* Tell the caller which queues were operated on. */ qset->rxqs = rxqs; qset->xdpqs = xdpqs; } static int fun_alloc_rings(struct net_device *netdev, struct fun_qset *qset) { struct funeth_txq **xdpqs = NULL, **txqs; struct funeth_rxq **rxqs; int err; err = fun_alloc_queue_irqs(netdev, qset->ntxqs, qset->nrxqs); if (err) return err; rxqs = kcalloc(qset->ntxqs + qset->nrxqs, sizeof(*rxqs), GFP_KERNEL); if (!rxqs) return -ENOMEM; if (qset->nxdpqs) { xdpqs = alloc_xdpqs(netdev, qset->nxdpqs, qset->sq_depth, qset->xdpq_start, qset->state); if (IS_ERR(xdpqs)) { err = PTR_ERR(xdpqs); goto free_qvec; } } txqs = (struct funeth_txq **)&rxqs[qset->nrxqs]; err = alloc_txqs(netdev, txqs, qset->ntxqs, qset->sq_depth, qset->txq_start, qset->state); if (err) goto free_xdpqs; err = alloc_rxqs(netdev, rxqs, qset->nrxqs, qset->cq_depth, qset->rq_depth, qset->rxq_start, qset->state); if (err) goto free_txqs; qset->rxqs = rxqs; qset->txqs = txqs; qset->xdpqs = xdpqs; return 0; free_txqs: free_txqs(txqs, qset->ntxqs, qset->txq_start, FUN_QSTATE_DESTROYED); free_xdpqs: free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, FUN_QSTATE_DESTROYED); free_qvec: kfree(rxqs); return err; } /* Take queues to the next level. Presently this means creating them on the * device. */ static int fun_advance_ring_state(struct net_device *dev, struct fun_qset *qset) { struct funeth_priv *fp = netdev_priv(dev); int i, err; for (i = 0; i < qset->nrxqs; i++) { err = fun_rxq_create_dev(qset->rxqs[i], xa_load(&fp->irqs, i + fp->rx_irq_ofst)); if (err) goto out; } for (i = 0; i < qset->ntxqs; i++) { err = fun_txq_create_dev(qset->txqs[i], xa_load(&fp->irqs, i)); if (err) goto out; } for (i = 0; i < qset->nxdpqs; i++) { err = fun_txq_create_dev(qset->xdpqs[i], NULL); if (err) goto out; } return 0; out: fun_free_rings(dev, qset); return err; } static int fun_port_create(struct net_device *netdev) { struct funeth_priv *fp = netdev_priv(netdev); union { struct fun_admin_port_req req; struct fun_admin_port_rsp rsp; } cmd; int rc; if (fp->lport != INVALID_LPORT) return 0; cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT, sizeof(cmd.req)); cmd.req.u.create = FUN_ADMIN_PORT_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, 0, netdev->dev_port); rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp, sizeof(cmd.rsp), 0); if (!rc) fp->lport = be16_to_cpu(cmd.rsp.u.create.lport); return rc; } static int fun_port_destroy(struct net_device *netdev) { struct funeth_priv *fp = netdev_priv(netdev); if (fp->lport == INVALID_LPORT) return 0; fp->lport = INVALID_LPORT; return fun_res_destroy(fp->fdev, FUN_ADMIN_OP_PORT, 0, netdev->dev_port); } static int fun_eth_create(struct funeth_priv *fp) { union { struct fun_admin_eth_req req; struct fun_admin_generic_create_rsp rsp; } cmd; int rc; cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ETH, sizeof(cmd.req)); cmd.req.u.create = FUN_ADMIN_ETH_CREATE_REQ_INIT( FUN_ADMIN_SUBOP_CREATE, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR, 0, fp->netdev->dev_port); rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp, sizeof(cmd.rsp), 0); return rc ? rc : be32_to_cpu(cmd.rsp.id); } static int fun_vi_create(struct funeth_priv *fp) { struct fun_admin_vi_req req = { .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_VI, sizeof(req)), .u.create = FUN_ADMIN_VI_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, 0, fp->netdev->dev_port, fp->netdev->dev_port) }; return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0); } /* Helper to create an ETH flow and bind an SQ to it. * Returns the ETH id (>= 0) on success or a negative error. */ int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid) { int rc, ethid; ethid = fun_eth_create(fp); if (ethid >= 0) { rc = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_EPSQ, sqid, FUN_ADMIN_BIND_TYPE_ETH, ethid); if (rc) { fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, ethid); ethid = rc; } } return ethid; } static irqreturn_t fun_queue_irq_handler(int irq, void *data) { struct fun_irq *p = data; if (p->rxq) { prefetch(p->rxq->next_cqe_info); p->rxq->irq_cnt++; } napi_schedule_irqoff(&p->napi); return IRQ_HANDLED; } static int fun_enable_irqs(struct net_device *dev) { struct funeth_priv *fp = netdev_priv(dev); unsigned long idx, last; unsigned int qidx; struct fun_irq *p; const char *qtype; int err; xa_for_each(&fp->irqs, idx, p) { if (p->txq) { qtype = "tx"; qidx = p->txq->qidx; } else if (p->rxq) { qtype = "rx"; qidx = p->rxq->qidx; } else { continue; } if (p->state != FUN_IRQ_INIT) continue; snprintf(p->name, sizeof(p->name) - 1, "%s-%s-%u", dev->name, qtype, qidx); err = request_irq(p->irq, fun_queue_irq_handler, 0, p->name, p); if (err) { netdev_err(dev, "Failed to allocate IRQ %u, err %d\n", p->irq, err); goto unroll; } p->state = FUN_IRQ_REQUESTED; } xa_for_each(&fp->irqs, idx, p) { if (p->state != FUN_IRQ_REQUESTED) continue; irq_set_affinity_notifier(p->irq, &p->aff_notify); irq_set_affinity_and_hint(p->irq, &p->affinity_mask); napi_enable(&p->napi); p->state = FUN_IRQ_ENABLED; } return 0; unroll: last = idx - 1; xa_for_each_range(&fp->irqs, idx, p, 0, last) if (p->state == FUN_IRQ_REQUESTED) { free_irq(p->irq, p); p->state = FUN_IRQ_INIT; } return err; } static void fun_disable_one_irq(struct fun_irq *irq) { napi_disable(&irq->napi); irq_set_affinity_notifier(irq->irq, NULL); irq_update_affinity_hint(irq->irq, NULL); free_irq(irq->irq, irq); irq->state = FUN_IRQ_INIT; } static void fun_disable_irqs(struct net_device *dev) { struct funeth_priv *fp = netdev_priv(dev); struct fun_irq *p; unsigned long idx; xa_for_each(&fp->irqs, idx, p) if (p->state == FUN_IRQ_ENABLED) fun_disable_one_irq(p); } static void fun_down(struct net_device *dev, struct fun_qset *qset) { struct funeth_priv *fp = netdev_priv(dev); /* If we don't have queues the data path is already down. * Note netif_running(dev) may be true. */ if (!rcu_access_pointer(fp->rxqs)) return; /* It is also down if the queues aren't on the device. */ if (fp->txqs[0]->init_state >= FUN_QSTATE_INIT_FULL) { netif_info(fp, ifdown, dev, "Tearing down data path on device\n"); fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_DISABLE, 0); netif_carrier_off(dev); netif_tx_disable(dev); fun_destroy_rss(fp); fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port); fun_disable_irqs(dev); } fun_free_rings(dev, qset); } static int fun_up(struct net_device *dev, struct fun_qset *qset) { static const int port_keys[] = { FUN_ADMIN_PORT_KEY_STATS_DMA_LOW, FUN_ADMIN_PORT_KEY_STATS_DMA_HIGH, FUN_ADMIN_PORT_KEY_ENABLE }; struct funeth_priv *fp = netdev_priv(dev); u64 vals[] = { lower_32_bits(fp->stats_dma_addr), upper_32_bits(fp->stats_dma_addr), FUN_PORT_FLAG_ENABLE_NOTIFY }; int err; netif_info(fp, ifup, dev, "Setting up data path on device\n"); if (qset->rxqs[0]->init_state < FUN_QSTATE_INIT_FULL) { err = fun_advance_ring_state(dev, qset); if (err) return err; } err = fun_vi_create(fp); if (err) goto free_queues; fp->txqs = qset->txqs; rcu_assign_pointer(fp->rxqs, qset->rxqs); rcu_assign_pointer(fp->xdpqs, qset->xdpqs); err = fun_enable_irqs(dev); if (err) goto destroy_vi; if (fp->rss_cfg) { err = fun_config_rss(dev, fp->hash_algo, fp->rss_key, fp->indir_table, FUN_ADMIN_SUBOP_CREATE); } else { /* The non-RSS case has only 1 queue. */ err = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_VI, dev->dev_port, FUN_ADMIN_BIND_TYPE_EPCQ, qset->rxqs[0]->hw_cqid); } if (err) goto disable_irqs; err = fun_port_write_cmds(fp, 3, port_keys, vals); if (err) goto free_rss; netif_tx_start_all_queues(dev); return 0; free_rss: fun_destroy_rss(fp); disable_irqs: fun_disable_irqs(dev); destroy_vi: fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port); free_queues: fun_free_rings(dev, qset); return err; } static int funeth_open(struct net_device *netdev) { struct funeth_priv *fp = netdev_priv(netdev); struct fun_qset qset = { .nrxqs = netdev->real_num_rx_queues, .ntxqs = netdev->real_num_tx_queues, .nxdpqs = fp->num_xdpqs, .cq_depth = fp->cq_depth, .rq_depth = fp->rq_depth, .sq_depth = fp->sq_depth, .state = FUN_QSTATE_INIT_FULL, }; int rc; rc = fun_alloc_rings(netdev, &qset); if (rc) return rc; rc = fun_up(netdev, &qset); if (rc) { qset.state = FUN_QSTATE_DESTROYED; fun_free_rings(netdev, &qset); } return rc; } static int funeth_close(struct net_device *netdev) { struct fun_qset qset = { .state = FUN_QSTATE_DESTROYED }; fun_down(netdev, &qset); return 0; } static void fun_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct funeth_priv *fp = netdev_priv(netdev); struct funeth_txq **xdpqs; struct funeth_rxq **rxqs; unsigned int i, start; stats->tx_packets = fp->tx_packets; stats->tx_bytes = fp->tx_bytes; stats->tx_dropped = fp->tx_dropped; stats->rx_packets = fp->rx_packets; stats->rx_bytes = fp->rx_bytes; stats->rx_dropped = fp->rx_dropped; rcu_read_lock(); rxqs = rcu_dereference(fp->rxqs); if (!rxqs) goto unlock; for (i = 0; i < netdev->real_num_tx_queues; i++) { struct funeth_txq_stats txs; FUN_QSTAT_READ(fp->txqs[i], start, txs); stats->tx_packets += txs.tx_pkts; stats->tx_bytes += txs.tx_bytes; stats->tx_dropped += txs.tx_map_err; } for (i = 0; i < netdev->real_num_rx_queues; i++) { struct funeth_rxq_stats rxs; FUN_QSTAT_READ(rxqs[i], start, rxs); stats->rx_packets += rxs.rx_pkts; stats->rx_bytes += rxs.rx_bytes; stats->rx_dropped += rxs.rx_map_err + rxs.rx_mem_drops; } xdpqs = rcu_dereference(fp->xdpqs); if (!xdpqs) goto unlock; for (i = 0; i < fp->num_xdpqs; i++) { struct funeth_txq_stats txs; FUN_QSTAT_READ(xdpqs[i], start, txs); stats->tx_packets += txs.tx_pkts; stats->tx_bytes += txs.tx_bytes; } unlock: rcu_read_unlock(); } static int fun_change_mtu(struct net_device *netdev, int new_mtu) { struct funeth_priv *fp = netdev_priv(netdev); int rc; rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MTU, new_mtu); if (!rc) netdev->mtu = new_mtu; return rc; } static int fun_set_macaddr(struct net_device *netdev, void *addr) { struct funeth_priv *fp = netdev_priv(netdev); struct sockaddr *saddr = addr; int rc; if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; if (ether_addr_equal(netdev->dev_addr, saddr->sa_data)) return 0; rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR, ether_addr_to_u64(saddr->sa_data)); if (!rc) eth_hw_addr_set(netdev, saddr->sa_data); return rc; } static int fun_get_port_attributes(struct net_device *netdev) { static const int keys[] = { FUN_ADMIN_PORT_KEY_MACADDR, FUN_ADMIN_PORT_KEY_CAPABILITIES, FUN_ADMIN_PORT_KEY_ADVERT, FUN_ADMIN_PORT_KEY_MTU }; static const int phys_keys[] = { FUN_ADMIN_PORT_KEY_LANE_ATTRS, }; struct funeth_priv *fp = netdev_priv(netdev); u64 data[ARRAY_SIZE(keys)]; u8 mac[ETH_ALEN]; int i, rc; rc = fun_port_read_cmds(fp, ARRAY_SIZE(keys), keys, data); if (rc) return rc; for (i = 0; i < ARRAY_SIZE(keys); i++) { switch (keys[i]) { case FUN_ADMIN_PORT_KEY_MACADDR: u64_to_ether_addr(data[i], mac); if (is_zero_ether_addr(mac)) { eth_hw_addr_random(netdev); } else if (is_valid_ether_addr(mac)) { eth_hw_addr_set(netdev, mac); } else { netdev_err(netdev, "device provided a bad MAC address %pM\n", mac); return -EINVAL; } break; case FUN_ADMIN_PORT_KEY_CAPABILITIES: fp->port_caps = data[i]; break; case FUN_ADMIN_PORT_KEY_ADVERT: fp->advertising = data[i]; break; case FUN_ADMIN_PORT_KEY_MTU: netdev->mtu = data[i]; break; } } if (!(fp->port_caps & FUN_PORT_CAP_VPORT)) { rc = fun_port_read_cmds(fp, ARRAY_SIZE(phys_keys), phys_keys, data); if (rc) return rc; fp->lane_attrs = data[0]; } if (netdev->addr_assign_type == NET_ADDR_RANDOM) return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR, ether_addr_to_u64(netdev->dev_addr)); return 0; } static int fun_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) { const struct funeth_priv *fp = netdev_priv(dev); return copy_to_user(ifr->ifr_data, &fp->hwtstamp_cfg, sizeof(fp->hwtstamp_cfg)) ? -EFAULT : 0; } static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) { struct funeth_priv *fp = netdev_priv(dev); struct hwtstamp_config cfg; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; /* no TX HW timestamps */ cfg.tx_type = HWTSTAMP_TX_OFF; switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: cfg.rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } fp->hwtstamp_cfg = cfg; return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; } static int fun_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCSHWTSTAMP: return fun_hwtstamp_set(dev, ifr); case SIOCGHWTSTAMP: return fun_hwtstamp_get(dev, ifr); default: return -EOPNOTSUPP; } } /* Prepare the queues for XDP. */ static int fun_enter_xdp(struct net_device *dev, struct bpf_prog *prog) { struct funeth_priv *fp = netdev_priv(dev); unsigned int i, nqs = num_online_cpus(); struct funeth_txq **xdpqs; struct funeth_rxq **rxqs; int err; xdpqs = alloc_xdpqs(dev, nqs, fp->sq_depth, 0, FUN_QSTATE_INIT_FULL); if (IS_ERR(xdpqs)) return PTR_ERR(xdpqs); rxqs = rtnl_dereference(fp->rxqs); for (i = 0; i < dev->real_num_rx_queues; i++) { err = fun_rxq_set_bpf(rxqs[i], prog); if (err) goto out; } fp->num_xdpqs = nqs; rcu_assign_pointer(fp->xdpqs, xdpqs); return 0; out: while (i--) fun_rxq_set_bpf(rxqs[i], NULL); free_xdpqs(xdpqs, nqs, 0, FUN_QSTATE_DESTROYED); return err; } /* Set the queues for non-XDP operation. */ static void fun_end_xdp(struct net_device *dev) { struct funeth_priv *fp = netdev_priv(dev); struct funeth_txq **xdpqs; struct funeth_rxq **rxqs; unsigned int i; xdpqs = rtnl_dereference(fp->xdpqs); rcu_assign_pointer(fp->xdpqs, NULL); synchronize_net(); /* at this point both Rx and Tx XDP processing has ended */ free_xdpqs(xdpqs, fp->num_xdpqs, 0, FUN_QSTATE_DESTROYED); fp->num_xdpqs = 0; rxqs = rtnl_dereference(fp->rxqs); for (i = 0; i < dev->real_num_rx_queues; i++) fun_rxq_set_bpf(rxqs[i], NULL); } #define XDP_MAX_MTU \ (PAGE_SIZE - FUN_XDP_HEADROOM - VLAN_ETH_HLEN - FUN_RX_TAILROOM) static int fun_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp) { struct bpf_prog *old_prog, *prog = xdp->prog; struct funeth_priv *fp = netdev_priv(dev); int i, err; /* XDP uses at most one buffer */ if (prog && dev->mtu > XDP_MAX_MTU) { netdev_err(dev, "device MTU %u too large for XDP\n", dev->mtu); NL_SET_ERR_MSG_MOD(xdp->extack, "Device MTU too large for XDP"); return -EINVAL; } if (!netif_running(dev)) { fp->num_xdpqs = prog ? num_online_cpus() : 0; } else if (prog && !fp->xdp_prog) { err = fun_enter_xdp(dev, prog); if (err) { NL_SET_ERR_MSG_MOD(xdp->extack, "Failed to set queues for XDP."); return err; } } else if (!prog && fp->xdp_prog) { fun_end_xdp(dev); } else { struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs); for (i = 0; i < dev->real_num_rx_queues; i++) WRITE_ONCE(rxqs[i]->xdp_prog, prog); } if (prog) xdp_features_set_redirect_target(dev, true); else xdp_features_clear_redirect_target(dev); dev->max_mtu = prog ? XDP_MAX_MTU : FUN_MAX_MTU; old_prog = xchg(&fp->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); return 0; } static int fun_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return fun_xdp_setup(dev, xdp); default: return -EINVAL; } } static int fun_init_vports(struct fun_ethdev *ed, unsigned int n) { if (ed->num_vports) return -EINVAL; ed->vport_info = kvcalloc(n, sizeof(*ed->vport_info), GFP_KERNEL); if (!ed->vport_info) return -ENOMEM; ed->num_vports = n; return 0; } static void fun_free_vports(struct fun_ethdev *ed) { kvfree(ed->vport_info); ed->vport_info = NULL; ed->num_vports = 0; } static struct fun_vport_info *fun_get_vport(struct fun_ethdev *ed, unsigned int vport) { if (!ed->vport_info || vport >= ed->num_vports) return NULL; return ed->vport_info + vport; } static int fun_set_vf_mac(struct net_device *dev, int vf, u8 *mac) { struct funeth_priv *fp = netdev_priv(dev); struct fun_adi_param mac_param = {}; struct fun_dev *fdev = fp->fdev; struct fun_ethdev *ed = to_fun_ethdev(fdev); struct fun_vport_info *vi; int rc = -EINVAL; if (is_multicast_ether_addr(mac)) return -EINVAL; mutex_lock(&ed->state_mutex); vi = fun_get_vport(ed, vf); if (!vi) goto unlock; mac_param.u.mac = FUN_ADI_MAC_INIT(ether_addr_to_u64(mac)); rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_MACADDR, vf + 1, &mac_param); if (!rc) ether_addr_copy(vi->mac, mac); unlock: mutex_unlock(&ed->state_mutex); return rc; } static int fun_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) { struct funeth_priv *fp = netdev_priv(dev); struct fun_adi_param vlan_param = {}; struct fun_dev *fdev = fp->fdev; struct fun_ethdev *ed = to_fun_ethdev(fdev); struct fun_vport_info *vi; int rc = -EINVAL; if (vlan > 4095 || qos > 7) return -EINVAL; if (vlan_proto && vlan_proto != htons(ETH_P_8021Q) && vlan_proto != htons(ETH_P_8021AD)) return -EINVAL; mutex_lock(&ed->state_mutex); vi = fun_get_vport(ed, vf); if (!vi) goto unlock; vlan_param.u.vlan = FUN_ADI_VLAN_INIT(be16_to_cpu(vlan_proto), ((u16)qos << VLAN_PRIO_SHIFT) | vlan); rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_VLAN, vf + 1, &vlan_param); if (!rc) { vi->vlan = vlan; vi->qos = qos; vi->vlan_proto = vlan_proto; } unlock: mutex_unlock(&ed->state_mutex); return rc; } static int fun_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate) { struct funeth_priv *fp = netdev_priv(dev); struct fun_adi_param rate_param = {}; struct fun_dev *fdev = fp->fdev; struct fun_ethdev *ed = to_fun_ethdev(fdev); struct fun_vport_info *vi; int rc = -EINVAL; if (min_tx_rate) return -EINVAL; mutex_lock(&ed->state_mutex); vi = fun_get_vport(ed, vf); if (!vi) goto unlock; rate_param.u.rate = FUN_ADI_RATE_INIT(max_tx_rate); rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_RATE, vf + 1, &rate_param); if (!rc) vi->max_rate = max_tx_rate; unlock: mutex_unlock(&ed->state_mutex); return rc; } static int fun_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi) { struct funeth_priv *fp = netdev_priv(dev); struct fun_ethdev *ed = to_fun_ethdev(fp->fdev); const struct fun_vport_info *vi; mutex_lock(&ed->state_mutex); vi = fun_get_vport(ed, vf); if (!vi) goto unlock; memset(ivi, 0, sizeof(*ivi)); ivi->vf = vf; ether_addr_copy(ivi->mac, vi->mac); ivi->vlan = vi->vlan; ivi->qos = vi->qos; ivi->vlan_proto = vi->vlan_proto; ivi->max_tx_rate = vi->max_rate; ivi->spoofchk = vi->spoofchk; unlock: mutex_unlock(&ed->state_mutex); return vi ? 0 : -EINVAL; } static void fun_uninit(struct net_device *dev) { struct funeth_priv *fp = netdev_priv(dev); fun_prune_queue_irqs(dev); xa_destroy(&fp->irqs); } static const struct net_device_ops fun_netdev_ops = { .ndo_open = funeth_open, .ndo_stop = funeth_close, .ndo_start_xmit = fun_start_xmit, .ndo_get_stats64 = fun_get_stats64, .ndo_change_mtu = fun_change_mtu, .ndo_set_mac_address = fun_set_macaddr, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = fun_ioctl, .ndo_uninit = fun_uninit, .ndo_bpf = fun_xdp, .ndo_xdp_xmit = fun_xdp_xmit_frames, .ndo_set_vf_mac = fun_set_vf_mac, .ndo_set_vf_vlan = fun_set_vf_vlan, .ndo_set_vf_rate = fun_set_vf_rate, .ndo_get_vf_config = fun_get_vf_config, }; #define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \ NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | \ NETIF_F_GSO_UDP_TUNNEL_CSUM) #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \ NETIF_F_GSO_UDP_L4) #define VLAN_FEAT (NETIF_F_SG | NETIF_F_HW_CSUM | TSO_FLAGS | \ GSO_ENCAP_FLAGS | NETIF_F_HIGHDMA) static void fun_dflt_rss_indir(struct funeth_priv *fp, unsigned int nrx) { unsigned int i; for (i = 0; i < fp->indir_table_nentries; i++) fp->indir_table[i] = ethtool_rxfh_indir_default(i, nrx); } /* Reset the RSS indirection table to equal distribution across the current * number of Rx queues. Called at init time and whenever the number of Rx * queues changes subsequently. Note that this may also resize the indirection * table. */ static void fun_reset_rss_indir(struct net_device *dev, unsigned int nrx) { struct funeth_priv *fp = netdev_priv(dev); if (!fp->rss_cfg) return; /* Set the table size to the max possible that allows an equal number * of occurrences of each CQ. */ fp->indir_table_nentries = rounddown(FUN_ETH_RSS_MAX_INDIR_ENT, nrx); fun_dflt_rss_indir(fp, nrx); } /* Update the RSS LUT to contain only queues in [0, nrx). Normally this will * update the LUT to an equal distribution among nrx queues, If @only_if_needed * is set the LUT is left unchanged if it already does not reference any queues * >= nrx. */ static int fun_rss_set_qnum(struct net_device *dev, unsigned int nrx, bool only_if_needed) { struct funeth_priv *fp = netdev_priv(dev); u32 old_lut[FUN_ETH_RSS_MAX_INDIR_ENT]; unsigned int i, oldsz; int err; if (!fp->rss_cfg) return 0; if (only_if_needed) { for (i = 0; i < fp->indir_table_nentries; i++) if (fp->indir_table[i] >= nrx) break; if (i >= fp->indir_table_nentries) return 0; } memcpy(old_lut, fp->indir_table, sizeof(old_lut)); oldsz = fp->indir_table_nentries; fun_reset_rss_indir(dev, nrx); err = fun_config_rss(dev, fp->hash_algo, fp->rss_key, fp->indir_table, FUN_ADMIN_SUBOP_MODIFY); if (!err) return 0; memcpy(fp->indir_table, old_lut, sizeof(old_lut)); fp->indir_table_nentries = oldsz; return err; } /* Allocate the DMA area for the RSS configuration commands to the device, and * initialize the hash, hash key, indirection table size and its entries to * their defaults. The indirection table defaults to equal distribution across * the Rx queues. */ static int fun_init_rss(struct net_device *dev) { struct funeth_priv *fp = netdev_priv(dev); size_t size = sizeof(fp->rss_key) + sizeof(fp->indir_table); fp->rss_hw_id = FUN_HCI_ID_INVALID; if (!(fp->port_caps & FUN_PORT_CAP_OFFLOADS)) return 0; fp->rss_cfg = dma_alloc_coherent(&fp->pdev->dev, size, &fp->rss_dma_addr, GFP_KERNEL); if (!fp->rss_cfg) return -ENOMEM; fp->hash_algo = FUN_ETH_RSS_ALG_TOEPLITZ; netdev_rss_key_fill(fp->rss_key, sizeof(fp->rss_key)); fun_reset_rss_indir(dev, dev->real_num_rx_queues); return 0; } static void fun_free_rss(struct funeth_priv *fp) { if (fp->rss_cfg) { dma_free_coherent(&fp->pdev->dev, sizeof(fp->rss_key) + sizeof(fp->indir_table), fp->rss_cfg, fp->rss_dma_addr); fp->rss_cfg = NULL; } } void fun_set_ring_count(struct net_device *netdev, unsigned int ntx, unsigned int nrx) { netif_set_real_num_tx_queues(netdev, ntx); if (nrx != netdev->real_num_rx_queues) { netif_set_real_num_rx_queues(netdev, nrx); fun_reset_rss_indir(netdev, nrx); } } static int fun_init_stats_area(struct funeth_priv *fp) { unsigned int nstats; if (!(fp->port_caps & FUN_PORT_CAP_STATS)) return 0; nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX + PORT_MAC_FEC_STATS_MAX; fp->stats = dma_alloc_coherent(&fp->pdev->dev, nstats * sizeof(u64), &fp->stats_dma_addr, GFP_KERNEL); if (!fp->stats) return -ENOMEM; return 0; } static void fun_free_stats_area(struct funeth_priv *fp) { unsigned int nstats; if (fp->stats) { nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX; dma_free_coherent(&fp->pdev->dev, nstats * sizeof(u64), fp->stats, fp->stats_dma_addr); fp->stats = NULL; } } static int fun_dl_port_register(struct net_device *netdev) { struct funeth_priv *fp = netdev_priv(netdev); struct devlink *dl = priv_to_devlink(fp->fdev); struct devlink_port_attrs attrs = {}; unsigned int idx; if (fp->port_caps & FUN_PORT_CAP_VPORT) { attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL; idx = fp->lport; } else { idx = netdev->dev_port; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.lanes = fp->lane_attrs & 7; if (fp->lane_attrs & FUN_PORT_LANE_SPLIT) { attrs.split = 1; attrs.phys.port_number = fp->lport & ~3; attrs.phys.split_subport_number = fp->lport & 3; } else { attrs.phys.port_number = fp->lport; } } devlink_port_attrs_set(&fp->dl_port, &attrs); return devlink_port_register(dl, &fp->dl_port, idx); } /* Determine the max Tx/Rx queues for a port. */ static int fun_max_qs(struct fun_ethdev *ed, unsigned int *ntx, unsigned int *nrx) { int neth; if (ed->num_ports > 1 || is_kdump_kernel()) { *ntx = 1; *nrx = 1; return 0; } neth = fun_get_res_count(&ed->fdev, FUN_ADMIN_OP_ETH); if (neth < 0) return neth; /* We determine the max number of queues based on the CPU * cores, device interrupts and queues, RSS size, and device Tx flows. * * - At least 1 Rx and 1 Tx queues. * - At most 1 Rx/Tx queue per core. * - Each Rx/Tx queue needs 1 SQ. */ *ntx = min(ed->nsqs_per_port - 1, num_online_cpus()); *nrx = *ntx; if (*ntx > neth) *ntx = neth; if (*nrx > FUN_ETH_RSS_MAX_INDIR_ENT) *nrx = FUN_ETH_RSS_MAX_INDIR_ENT; return 0; } static void fun_queue_defaults(struct net_device *dev, unsigned int nsqs) { unsigned int ntx, nrx; ntx = min(dev->num_tx_queues, FUN_DFLT_QUEUES); nrx = min(dev->num_rx_queues, FUN_DFLT_QUEUES); if (ntx <= nrx) { ntx = min(ntx, nsqs / 2); nrx = min(nrx, nsqs - ntx); } else { nrx = min(nrx, nsqs / 2); ntx = min(ntx, nsqs - nrx); } netif_set_real_num_tx_queues(dev, ntx); netif_set_real_num_rx_queues(dev, nrx); } /* Replace the existing Rx/Tx/XDP queues with equal number of queues with * different settings, e.g. depth. This is a disruptive replacement that * temporarily shuts down the data path and should be limited to changes that * can't be applied to live queues. The old queues are always discarded. */ int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs, struct netlink_ext_ack *extack) { struct fun_qset oldqs = { .state = FUN_QSTATE_DESTROYED }; struct funeth_priv *fp = netdev_priv(dev); int err; newqs->nrxqs = dev->real_num_rx_queues; newqs->ntxqs = dev->real_num_tx_queues; newqs->nxdpqs = fp->num_xdpqs; newqs->state = FUN_QSTATE_INIT_SW; err = fun_alloc_rings(dev, newqs); if (err) { NL_SET_ERR_MSG_MOD(extack, "Unable to allocate memory for new queues, keeping current settings"); return err; } fun_down(dev, &oldqs); err = fun_up(dev, newqs); if (!err) return 0; /* The new queues couldn't be installed. We do not retry the old queues * as they are the same to the device as the new queues and would * similarly fail. */ newqs->state = FUN_QSTATE_DESTROYED; fun_free_rings(dev, newqs); NL_SET_ERR_MSG_MOD(extack, "Unable to restore the data path with the new queues."); return err; } /* Change the number of Rx/Tx queues of a device while it is up. This is done * by incrementally adding/removing queues to meet the new requirements while * handling ongoing traffic. */ int fun_change_num_queues(struct net_device *dev, unsigned int ntx, unsigned int nrx) { unsigned int keep_tx = min(dev->real_num_tx_queues, ntx); unsigned int keep_rx = min(dev->real_num_rx_queues, nrx); struct funeth_priv *fp = netdev_priv(dev); struct fun_qset oldqs = { .rxqs = rtnl_dereference(fp->rxqs), .txqs = fp->txqs, .nrxqs = dev->real_num_rx_queues, .ntxqs = dev->real_num_tx_queues, .rxq_start = keep_rx, .txq_start = keep_tx, .state = FUN_QSTATE_DESTROYED }; struct fun_qset newqs = { .nrxqs = nrx, .ntxqs = ntx, .rxq_start = keep_rx, .txq_start = keep_tx, .cq_depth = fp->cq_depth, .rq_depth = fp->rq_depth, .sq_depth = fp->sq_depth, .state = FUN_QSTATE_INIT_FULL }; int i, err; err = fun_alloc_rings(dev, &newqs); if (err) goto free_irqs; err = fun_enable_irqs(dev); /* of any newly added queues */ if (err) goto free_rings; /* copy the queues we are keeping to the new set */ memcpy(newqs.rxqs, oldqs.rxqs, keep_rx * sizeof(*oldqs.rxqs)); memcpy(newqs.txqs, fp->txqs, keep_tx * sizeof(*fp->txqs)); if (nrx < dev->real_num_rx_queues) { err = fun_rss_set_qnum(dev, nrx, true); if (err) goto disable_tx_irqs; for (i = nrx; i < dev->real_num_rx_queues; i++) fun_disable_one_irq(container_of(oldqs.rxqs[i]->napi, struct fun_irq, napi)); netif_set_real_num_rx_queues(dev, nrx); } if (ntx < dev->real_num_tx_queues) netif_set_real_num_tx_queues(dev, ntx); rcu_assign_pointer(fp->rxqs, newqs.rxqs); fp->txqs = newqs.txqs; synchronize_net(); if (ntx > dev->real_num_tx_queues) netif_set_real_num_tx_queues(dev, ntx); if (nrx > dev->real_num_rx_queues) { netif_set_real_num_rx_queues(dev, nrx); fun_rss_set_qnum(dev, nrx, false); } /* disable interrupts of any excess Tx queues */ for (i = keep_tx; i < oldqs.ntxqs; i++) fun_disable_one_irq(oldqs.txqs[i]->irq); fun_free_rings(dev, &oldqs); fun_prune_queue_irqs(dev); return 0; disable_tx_irqs: for (i = oldqs.ntxqs; i < ntx; i++) fun_disable_one_irq(newqs.txqs[i]->irq); free_rings: newqs.state = FUN_QSTATE_DESTROYED; fun_free_rings(dev, &newqs); free_irqs: fun_prune_queue_irqs(dev); return err; } static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid) { struct fun_dev *fdev = &ed->fdev; struct net_device *netdev; struct funeth_priv *fp; unsigned int ntx, nrx; int rc; rc = fun_max_qs(ed, &ntx, &nrx); if (rc) return rc; netdev = alloc_etherdev_mqs(sizeof(*fp), ntx, nrx); if (!netdev) { rc = -ENOMEM; goto done; } netdev->dev_port = portid; fun_queue_defaults(netdev, ed->nsqs_per_port); fp = netdev_priv(netdev); fp->fdev = fdev; fp->pdev = to_pci_dev(fdev->dev); fp->netdev = netdev; xa_init(&fp->irqs); fp->rx_irq_ofst = ntx; seqcount_init(&fp->link_seq); fp->lport = INVALID_LPORT; rc = fun_port_create(netdev); if (rc) goto free_netdev; /* bind port to admin CQ for async events */ rc = fun_bind(fdev, FUN_ADMIN_BIND_TYPE_PORT, portid, FUN_ADMIN_BIND_TYPE_EPCQ, 0); if (rc) goto destroy_port; rc = fun_get_port_attributes(netdev); if (rc) goto destroy_port; rc = fun_init_rss(netdev); if (rc) goto destroy_port; rc = fun_init_stats_area(fp); if (rc) goto free_rss; SET_NETDEV_DEV(netdev, fdev->dev); SET_NETDEV_DEVLINK_PORT(netdev, &fp->dl_port); netdev->netdev_ops = &fun_netdev_ops; netdev->hw_features = NETIF_F_SG | NETIF_F_RXHASH | NETIF_F_RXCSUM; if (fp->port_caps & FUN_PORT_CAP_OFFLOADS) netdev->hw_features |= NETIF_F_HW_CSUM | TSO_FLAGS; if (fp->port_caps & FUN_PORT_CAP_ENCAP_OFFLOADS) netdev->hw_features |= GSO_ENCAP_FLAGS; netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA; netdev->vlan_features = netdev->features & VLAN_FEAT; netdev->mpls_features = netdev->vlan_features; netdev->hw_enc_features = netdev->hw_features; netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = FUN_MAX_MTU; fun_set_ethtool_ops(netdev); /* configurable parameters */ fp->sq_depth = min(SQ_DEPTH, fdev->q_depth); fp->cq_depth = min(CQ_DEPTH, fdev->q_depth); fp->rq_depth = min_t(unsigned int, RQ_DEPTH, fdev->q_depth); fp->rx_coal_usec = CQ_INTCOAL_USEC; fp->rx_coal_count = CQ_INTCOAL_NPKT; fp->tx_coal_usec = SQ_INTCOAL_USEC; fp->tx_coal_count = SQ_INTCOAL_NPKT; fp->cq_irq_db = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count); rc = fun_dl_port_register(netdev); if (rc) goto free_stats; fp->ktls_id = FUN_HCI_ID_INVALID; fun_ktls_init(netdev); /* optional, failure OK */ netif_carrier_off(netdev); ed->netdevs[portid] = netdev; rc = register_netdev(netdev); if (rc) goto unreg_devlink; return 0; unreg_devlink: ed->netdevs[portid] = NULL; fun_ktls_cleanup(fp); devlink_port_unregister(&fp->dl_port); free_stats: fun_free_stats_area(fp); free_rss: fun_free_rss(fp); destroy_port: fun_port_destroy(netdev); free_netdev: free_netdev(netdev); done: dev_err(fdev->dev, "couldn't allocate port %u, error %d", portid, rc); return rc; } static void fun_destroy_netdev(struct net_device *netdev) { struct funeth_priv *fp; fp = netdev_priv(netdev); unregister_netdev(netdev); devlink_port_unregister(&fp->dl_port); fun_ktls_cleanup(fp); fun_free_stats_area(fp); fun_free_rss(fp); fun_port_destroy(netdev); free_netdev(netdev); } static int fun_create_ports(struct fun_ethdev *ed, unsigned int nports) { struct fun_dev *fd = &ed->fdev; int i, rc; /* The admin queue takes 1 IRQ and 2 SQs. */ ed->nsqs_per_port = min(fd->num_irqs - 1, fd->kern_end_qid - 2) / nports; if (ed->nsqs_per_port < 2) { dev_err(fd->dev, "Too few SQs for %u ports", nports); return -EINVAL; } ed->netdevs = kcalloc(nports, sizeof(*ed->netdevs), GFP_KERNEL); if (!ed->netdevs) return -ENOMEM; ed->num_ports = nports; for (i = 0; i < nports; i++) { rc = fun_create_netdev(ed, i); if (rc) goto free_netdevs; } return 0; free_netdevs: while (i) fun_destroy_netdev(ed->netdevs[--i]); kfree(ed->netdevs); ed->netdevs = NULL; ed->num_ports = 0; return rc; } static void fun_destroy_ports(struct fun_ethdev *ed) { unsigned int i; for (i = 0; i < ed->num_ports; i++) fun_destroy_netdev(ed->netdevs[i]); kfree(ed->netdevs); ed->netdevs = NULL; ed->num_ports = 0; } static void fun_update_link_state(const struct fun_ethdev *ed, const struct fun_admin_port_notif *notif) { unsigned int port_idx = be16_to_cpu(notif->id); struct net_device *netdev; struct funeth_priv *fp; if (port_idx >= ed->num_ports) return; netdev = ed->netdevs[port_idx]; fp = netdev_priv(netdev); write_seqcount_begin(&fp->link_seq); fp->link_speed = be32_to_cpu(notif->speed) * 10; /* 10 Mbps->Mbps */ fp->active_fc = notif->flow_ctrl; fp->active_fec = notif->fec; fp->xcvr_type = notif->xcvr_type; fp->link_down_reason = notif->link_down_reason; fp->lp_advertising = be64_to_cpu(notif->lp_advertising); if ((notif->link_state | notif->missed_events) & FUN_PORT_FLAG_MAC_DOWN) netif_carrier_off(netdev); if (notif->link_state & FUN_PORT_FLAG_MAC_UP) netif_carrier_on(netdev); write_seqcount_end(&fp->link_seq); fun_report_link(netdev); } /* handler for async events delivered through the admin CQ */ static void fun_event_cb(struct fun_dev *fdev, void *entry) { u8 op = ((struct fun_admin_rsp_common *)entry)->op; if (op == FUN_ADMIN_OP_PORT) { const struct fun_admin_port_notif *rsp = entry; if (rsp->subop == FUN_ADMIN_SUBOP_NOTIFY) { fun_update_link_state(to_fun_ethdev(fdev), rsp); } else if (rsp->subop == FUN_ADMIN_SUBOP_RES_COUNT) { const struct fun_admin_res_count_rsp *r = entry; if (r->count.data) set_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags); else set_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags); fun_serv_sched(fdev); } else { dev_info(fdev->dev, "adminq event unexpected op %u subop %u", op, rsp->subop); } } else { dev_info(fdev->dev, "adminq event unexpected op %u", op); } } /* handler for pending work managed by the service task */ static void fun_service_cb(struct fun_dev *fdev) { struct fun_ethdev *ed = to_fun_ethdev(fdev); int rc; if (test_and_clear_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags)) fun_destroy_ports(ed); if (!test_and_clear_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags)) return; rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT); if (rc < 0 || rc == ed->num_ports) return; if (ed->num_ports) fun_destroy_ports(ed); if (rc) fun_create_ports(ed, rc); } static int funeth_sriov_configure(struct pci_dev *pdev, int nvfs) { struct fun_dev *fdev = pci_get_drvdata(pdev); struct fun_ethdev *ed = to_fun_ethdev(fdev); int rc; if (nvfs == 0) { if (pci_vfs_assigned(pdev)) { dev_warn(&pdev->dev, "Cannot disable SR-IOV while VFs are assigned\n"); return -EPERM; } mutex_lock(&ed->state_mutex); fun_free_vports(ed); mutex_unlock(&ed->state_mutex); pci_disable_sriov(pdev); return 0; } rc = pci_enable_sriov(pdev, nvfs); if (rc) return rc; mutex_lock(&ed->state_mutex); rc = fun_init_vports(ed, nvfs); mutex_unlock(&ed->state_mutex); if (rc) { pci_disable_sriov(pdev); return rc; } return nvfs; } static int funeth_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct fun_dev_params aqreq = { .cqe_size_log2 = ilog2(ADMIN_CQE_SIZE), .sqe_size_log2 = ilog2(ADMIN_SQE_SIZE), .cq_depth = ADMIN_CQ_DEPTH, .sq_depth = ADMIN_SQ_DEPTH, .rq_depth = ADMIN_RQ_DEPTH, .min_msix = 2, /* 1 Rx + 1 Tx */ .event_cb = fun_event_cb, .serv_cb = fun_service_cb, }; struct devlink *devlink; struct fun_ethdev *ed; struct fun_dev *fdev; int rc; devlink = fun_devlink_alloc(&pdev->dev); if (!devlink) { dev_err(&pdev->dev, "devlink alloc failed\n"); return -ENOMEM; } ed = devlink_priv(devlink); mutex_init(&ed->state_mutex); fdev = &ed->fdev; rc = fun_dev_enable(fdev, pdev, &aqreq, KBUILD_MODNAME); if (rc) goto free_devlink; rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT); if (rc > 0) rc = fun_create_ports(ed, rc); if (rc < 0) goto disable_dev; fun_serv_restart(fdev); fun_devlink_register(devlink); return 0; disable_dev: fun_dev_disable(fdev); free_devlink: mutex_destroy(&ed->state_mutex); fun_devlink_free(devlink); return rc; } static void funeth_remove(struct pci_dev *pdev) { struct fun_dev *fdev = pci_get_drvdata(pdev); struct devlink *devlink; struct fun_ethdev *ed; ed = to_fun_ethdev(fdev); devlink = priv_to_devlink(ed); fun_devlink_unregister(devlink); #ifdef CONFIG_PCI_IOV funeth_sriov_configure(pdev, 0); #endif fun_serv_stop(fdev); fun_destroy_ports(ed); fun_dev_disable(fdev); mutex_destroy(&ed->state_mutex); fun_devlink_free(devlink); } static struct pci_driver funeth_driver = { .name = KBUILD_MODNAME, .id_table = funeth_id_table, .probe = funeth_probe, .remove = funeth_remove, .shutdown = funeth_remove, .sriov_configure = funeth_sriov_configure, }; module_pci_driver(funeth_driver); MODULE_AUTHOR("Dimitris Michailidis <[email protected]>"); MODULE_DESCRIPTION("Fungible Ethernet Network Driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DEVICE_TABLE(pci, funeth_id_table);
linux-master
drivers/net/ethernet/fungible/funeth/funeth_main.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) #include <linux/ethtool.h> #include <linux/linkmode.h> #include <linux/netdevice.h> #include <linux/nvme.h> #include <linux/io.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/pci.h> #include <linux/rtnetlink.h> #include "funeth.h" #include "fun_port.h" #include "funeth_txrx.h" /* Min queue depth. The smallest power-of-2 supporting jumbo frames with 4K * pages is 8. Require it for all types of queues though some could work with * fewer entries. */ #define FUNETH_MIN_QDEPTH 8 static const char mac_tx_stat_names[][ETH_GSTRING_LEN] = { "mac_tx_octets_total", "mac_tx_frames_total", "mac_tx_vlan_frames_ok", "mac_tx_unicast_frames", "mac_tx_multicast_frames", "mac_tx_broadcast_frames", "mac_tx_errors", "mac_tx_CBFCPAUSE0", "mac_tx_CBFCPAUSE1", "mac_tx_CBFCPAUSE2", "mac_tx_CBFCPAUSE3", "mac_tx_CBFCPAUSE4", "mac_tx_CBFCPAUSE5", "mac_tx_CBFCPAUSE6", "mac_tx_CBFCPAUSE7", "mac_tx_CBFCPAUSE8", "mac_tx_CBFCPAUSE9", "mac_tx_CBFCPAUSE10", "mac_tx_CBFCPAUSE11", "mac_tx_CBFCPAUSE12", "mac_tx_CBFCPAUSE13", "mac_tx_CBFCPAUSE14", "mac_tx_CBFCPAUSE15", }; static const char mac_rx_stat_names[][ETH_GSTRING_LEN] = { "mac_rx_octets_total", "mac_rx_frames_total", "mac_rx_VLAN_frames_ok", "mac_rx_unicast_frames", "mac_rx_multicast_frames", "mac_rx_broadcast_frames", "mac_rx_drop_events", "mac_rx_errors", "mac_rx_alignment_errors", "mac_rx_CBFCPAUSE0", "mac_rx_CBFCPAUSE1", "mac_rx_CBFCPAUSE2", "mac_rx_CBFCPAUSE3", "mac_rx_CBFCPAUSE4", "mac_rx_CBFCPAUSE5", "mac_rx_CBFCPAUSE6", "mac_rx_CBFCPAUSE7", "mac_rx_CBFCPAUSE8", "mac_rx_CBFCPAUSE9", "mac_rx_CBFCPAUSE10", "mac_rx_CBFCPAUSE11", "mac_rx_CBFCPAUSE12", "mac_rx_CBFCPAUSE13", "mac_rx_CBFCPAUSE14", "mac_rx_CBFCPAUSE15", }; static const char * const txq_stat_names[] = { "tx_pkts", "tx_bytes", "tx_cso", "tx_tso", "tx_encapsulated_tso", "tx_uso", "tx_more", "tx_queue_stops", "tx_queue_restarts", "tx_mapping_errors", "tx_tls_encrypted_packets", "tx_tls_encrypted_bytes", "tx_tls_ooo", "tx_tls_drop_no_sync_data", }; static const char * const xdpq_stat_names[] = { "tx_xdp_pkts", "tx_xdp_bytes", "tx_xdp_full", "tx_xdp_mapping_errors", }; static const char * const rxq_stat_names[] = { "rx_pkts", "rx_bytes", "rx_cso", "gro_pkts", "gro_merged", "rx_xdp_tx", "rx_xdp_redir", "rx_xdp_drops", "rx_buffers", "rx_page_allocs", "rx_drops", "rx_budget_exhausted", "rx_mapping_errors", }; static const char * const tls_stat_names[] = { "tx_tls_ctx", "tx_tls_del", "tx_tls_resync", }; static void fun_link_modes_to_ethtool(u64 modes, unsigned long *ethtool_modes_map) { #define ADD_LINK_MODE(mode) \ __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, ethtool_modes_map) if (modes & FUN_PORT_CAP_AUTONEG) ADD_LINK_MODE(Autoneg); if (modes & FUN_PORT_CAP_1000_X) ADD_LINK_MODE(1000baseX_Full); if (modes & FUN_PORT_CAP_10G_R) { ADD_LINK_MODE(10000baseCR_Full); ADD_LINK_MODE(10000baseSR_Full); ADD_LINK_MODE(10000baseLR_Full); ADD_LINK_MODE(10000baseER_Full); } if (modes & FUN_PORT_CAP_25G_R) { ADD_LINK_MODE(25000baseCR_Full); ADD_LINK_MODE(25000baseSR_Full); } if (modes & FUN_PORT_CAP_40G_R4) { ADD_LINK_MODE(40000baseCR4_Full); ADD_LINK_MODE(40000baseSR4_Full); ADD_LINK_MODE(40000baseLR4_Full); } if (modes & FUN_PORT_CAP_50G_R2) { ADD_LINK_MODE(50000baseCR2_Full); ADD_LINK_MODE(50000baseSR2_Full); } if (modes & FUN_PORT_CAP_50G_R) { ADD_LINK_MODE(50000baseCR_Full); ADD_LINK_MODE(50000baseSR_Full); ADD_LINK_MODE(50000baseLR_ER_FR_Full); } if (modes & FUN_PORT_CAP_100G_R4) { ADD_LINK_MODE(100000baseCR4_Full); ADD_LINK_MODE(100000baseSR4_Full); ADD_LINK_MODE(100000baseLR4_ER4_Full); } if (modes & FUN_PORT_CAP_100G_R2) { ADD_LINK_MODE(100000baseCR2_Full); ADD_LINK_MODE(100000baseSR2_Full); ADD_LINK_MODE(100000baseLR2_ER2_FR2_Full); } if (modes & FUN_PORT_CAP_FEC_NONE) ADD_LINK_MODE(FEC_NONE); if (modes & FUN_PORT_CAP_FEC_FC) ADD_LINK_MODE(FEC_BASER); if (modes & FUN_PORT_CAP_FEC_RS) ADD_LINK_MODE(FEC_RS); if (modes & FUN_PORT_CAP_RX_PAUSE) ADD_LINK_MODE(Pause); #undef ADD_LINK_MODE } static void set_asym_pause(u64 advertising, struct ethtool_link_ksettings *ks) { bool rx_pause, tx_pause; rx_pause = advertising & FUN_PORT_CAP_RX_PAUSE; tx_pause = advertising & FUN_PORT_CAP_TX_PAUSE; if (tx_pause ^ rx_pause) ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); } static unsigned int fun_port_type(unsigned int xcvr) { if (!xcvr) return PORT_NONE; switch (xcvr & 7) { case FUN_XCVR_BASET: return PORT_TP; case FUN_XCVR_CU: return PORT_DA; default: return PORT_FIBRE; } } static int fun_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *ks) { const struct funeth_priv *fp = netdev_priv(netdev); unsigned int seq, speed, xcvr; u64 lp_advertising; bool link_up; ethtool_link_ksettings_zero_link_mode(ks, supported); ethtool_link_ksettings_zero_link_mode(ks, advertising); ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); /* Link settings change asynchronously, take a consistent snapshot */ do { seq = read_seqcount_begin(&fp->link_seq); link_up = netif_carrier_ok(netdev); speed = fp->link_speed; xcvr = fp->xcvr_type; lp_advertising = fp->lp_advertising; } while (read_seqcount_retry(&fp->link_seq, seq)); if (link_up) { ks->base.speed = speed; ks->base.duplex = DUPLEX_FULL; fun_link_modes_to_ethtool(lp_advertising, ks->link_modes.lp_advertising); } else { ks->base.speed = SPEED_UNKNOWN; ks->base.duplex = DUPLEX_UNKNOWN; } ks->base.autoneg = (fp->advertising & FUN_PORT_CAP_AUTONEG) ? AUTONEG_ENABLE : AUTONEG_DISABLE; ks->base.port = fun_port_type(xcvr); fun_link_modes_to_ethtool(fp->port_caps, ks->link_modes.supported); if (fp->port_caps & (FUN_PORT_CAP_RX_PAUSE | FUN_PORT_CAP_TX_PAUSE)) ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause); fun_link_modes_to_ethtool(fp->advertising, ks->link_modes.advertising); set_asym_pause(fp->advertising, ks); return 0; } static u64 fun_advert_modes(const struct ethtool_link_ksettings *ks) { u64 modes = 0; #define HAS_MODE(mode) \ ethtool_link_ksettings_test_link_mode(ks, advertising, mode) if (HAS_MODE(1000baseX_Full)) modes |= FUN_PORT_CAP_1000_X; if (HAS_MODE(10000baseCR_Full) || HAS_MODE(10000baseSR_Full) || HAS_MODE(10000baseLR_Full) || HAS_MODE(10000baseER_Full)) modes |= FUN_PORT_CAP_10G_R; if (HAS_MODE(25000baseCR_Full) || HAS_MODE(25000baseSR_Full)) modes |= FUN_PORT_CAP_25G_R; if (HAS_MODE(40000baseCR4_Full) || HAS_MODE(40000baseSR4_Full) || HAS_MODE(40000baseLR4_Full)) modes |= FUN_PORT_CAP_40G_R4; if (HAS_MODE(50000baseCR2_Full) || HAS_MODE(50000baseSR2_Full)) modes |= FUN_PORT_CAP_50G_R2; if (HAS_MODE(50000baseCR_Full) || HAS_MODE(50000baseSR_Full) || HAS_MODE(50000baseLR_ER_FR_Full)) modes |= FUN_PORT_CAP_50G_R; if (HAS_MODE(100000baseCR4_Full) || HAS_MODE(100000baseSR4_Full) || HAS_MODE(100000baseLR4_ER4_Full)) modes |= FUN_PORT_CAP_100G_R4; if (HAS_MODE(100000baseCR2_Full) || HAS_MODE(100000baseSR2_Full) || HAS_MODE(100000baseLR2_ER2_FR2_Full)) modes |= FUN_PORT_CAP_100G_R2; return modes; #undef HAS_MODE } static u64 fun_speed_to_link_mode(unsigned int speed) { switch (speed) { case SPEED_100000: return FUN_PORT_CAP_100G_R4 | FUN_PORT_CAP_100G_R2; case SPEED_50000: return FUN_PORT_CAP_50G_R | FUN_PORT_CAP_50G_R2; case SPEED_40000: return FUN_PORT_CAP_40G_R4; case SPEED_25000: return FUN_PORT_CAP_25G_R; case SPEED_10000: return FUN_PORT_CAP_10G_R; case SPEED_1000: return FUN_PORT_CAP_1000_X; default: return 0; } } static int fun_change_advert(struct funeth_priv *fp, u64 new_advert) { int err; if (new_advert == fp->advertising) return 0; err = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT, new_advert); if (!err) fp->advertising = new_advert; return err; } #define FUN_PORT_CAP_FEC_MASK \ (FUN_PORT_CAP_FEC_NONE | FUN_PORT_CAP_FEC_FC | FUN_PORT_CAP_FEC_RS) static int fun_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *ks) { __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {}; struct funeth_priv *fp = netdev_priv(netdev); u64 new_advert; /* eswitch ports don't support mode changes */ if (fp->port_caps & FUN_PORT_CAP_VPORT) return -EOPNOTSUPP; if (ks->base.duplex == DUPLEX_HALF) return -EINVAL; if (ks->base.autoneg == AUTONEG_ENABLE && !(fp->port_caps & FUN_PORT_CAP_AUTONEG)) return -EINVAL; if (ks->base.autoneg == AUTONEG_ENABLE) { if (linkmode_empty(ks->link_modes.advertising)) return -EINVAL; fun_link_modes_to_ethtool(fp->port_caps, supported); if (!linkmode_subset(ks->link_modes.advertising, supported)) return -EINVAL; new_advert = fun_advert_modes(ks) | FUN_PORT_CAP_AUTONEG; } else { new_advert = fun_speed_to_link_mode(ks->base.speed); new_advert &= fp->port_caps; if (!new_advert) return -EINVAL; } new_advert |= fp->advertising & (FUN_PORT_CAP_PAUSE_MASK | FUN_PORT_CAP_FEC_MASK); return fun_change_advert(fp, new_advert); } static void fun_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { const struct funeth_priv *fp = netdev_priv(netdev); u8 active_pause = fp->active_fc; pause->rx_pause = !!(active_pause & FUN_PORT_CAP_RX_PAUSE); pause->tx_pause = !!(active_pause & FUN_PORT_CAP_TX_PAUSE); pause->autoneg = !!(fp->advertising & FUN_PORT_CAP_AUTONEG); } static int fun_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct funeth_priv *fp = netdev_priv(netdev); u64 new_advert; if (fp->port_caps & FUN_PORT_CAP_VPORT) return -EOPNOTSUPP; /* Forcing PAUSE settings with AN enabled is unsupported. */ if (!pause->autoneg && (fp->advertising & FUN_PORT_CAP_AUTONEG)) return -EOPNOTSUPP; if (pause->autoneg && !(fp->advertising & FUN_PORT_CAP_AUTONEG)) return -EINVAL; if (pause->tx_pause && !(fp->port_caps & FUN_PORT_CAP_TX_PAUSE)) return -EINVAL; if (pause->rx_pause && !(fp->port_caps & FUN_PORT_CAP_RX_PAUSE)) return -EINVAL; new_advert = fp->advertising & ~FUN_PORT_CAP_PAUSE_MASK; if (pause->tx_pause) new_advert |= FUN_PORT_CAP_TX_PAUSE; if (pause->rx_pause) new_advert |= FUN_PORT_CAP_RX_PAUSE; return fun_change_advert(fp, new_advert); } static int fun_restart_an(struct net_device *netdev) { struct funeth_priv *fp = netdev_priv(netdev); if (!(fp->advertising & FUN_PORT_CAP_AUTONEG)) return -EOPNOTSUPP; return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT, FUN_PORT_CAP_AUTONEG); } static int fun_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct funeth_priv *fp = netdev_priv(netdev); unsigned int beacon; if (fp->port_caps & FUN_PORT_CAP_VPORT) return -EOPNOTSUPP; if (state != ETHTOOL_ID_ACTIVE && state != ETHTOOL_ID_INACTIVE) return -EOPNOTSUPP; beacon = state == ETHTOOL_ID_ACTIVE ? FUN_PORT_LED_BEACON_ON : FUN_PORT_LED_BEACON_OFF; return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_LED, beacon); } static void fun_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { const struct funeth_priv *fp = netdev_priv(netdev); strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strscpy(info->bus_info, pci_name(fp->pdev), sizeof(info->bus_info)); } static u32 fun_get_msglevel(struct net_device *netdev) { const struct funeth_priv *fp = netdev_priv(netdev); return fp->msg_enable; } static void fun_set_msglevel(struct net_device *netdev, u32 value) { struct funeth_priv *fp = netdev_priv(netdev); fp->msg_enable = value; } static int fun_get_regs_len(struct net_device *dev) { return NVME_REG_ACQ + sizeof(u64); } static void fun_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { const struct funeth_priv *fp = netdev_priv(dev); void __iomem *bar = fp->fdev->bar; regs->version = 0; *(u64 *)(buf + NVME_REG_CAP) = readq(bar + NVME_REG_CAP); *(u32 *)(buf + NVME_REG_VS) = readl(bar + NVME_REG_VS); *(u32 *)(buf + NVME_REG_INTMS) = readl(bar + NVME_REG_INTMS); *(u32 *)(buf + NVME_REG_INTMC) = readl(bar + NVME_REG_INTMC); *(u32 *)(buf + NVME_REG_CC) = readl(bar + NVME_REG_CC); *(u32 *)(buf + NVME_REG_CSTS) = readl(bar + NVME_REG_CSTS); *(u32 *)(buf + NVME_REG_AQA) = readl(bar + NVME_REG_AQA); *(u64 *)(buf + NVME_REG_ASQ) = readq(bar + NVME_REG_ASQ); *(u64 *)(buf + NVME_REG_ACQ) = readq(bar + NVME_REG_ACQ); } static int fun_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, struct kernel_ethtool_coalesce *kcoal, struct netlink_ext_ack *ext_ack) { const struct funeth_priv *fp = netdev_priv(netdev); coal->rx_coalesce_usecs = fp->rx_coal_usec; coal->rx_max_coalesced_frames = fp->rx_coal_count; coal->use_adaptive_rx_coalesce = !fp->cq_irq_db; coal->tx_coalesce_usecs = fp->tx_coal_usec; coal->tx_max_coalesced_frames = fp->tx_coal_count; return 0; } static int fun_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, struct kernel_ethtool_coalesce *kcoal, struct netlink_ext_ack *ext_ack) { struct funeth_priv *fp = netdev_priv(netdev); struct funeth_rxq **rxqs; unsigned int i, db_val; if (coal->rx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M || coal->rx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M || (coal->rx_coalesce_usecs | coal->rx_max_coalesced_frames) == 0 || coal->tx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M || coal->tx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M || (coal->tx_coalesce_usecs | coal->tx_max_coalesced_frames) == 0) return -EINVAL; /* a timer is required if there's any coalescing */ if ((coal->rx_max_coalesced_frames > 1 && !coal->rx_coalesce_usecs) || (coal->tx_max_coalesced_frames > 1 && !coal->tx_coalesce_usecs)) return -EINVAL; fp->rx_coal_usec = coal->rx_coalesce_usecs; fp->rx_coal_count = coal->rx_max_coalesced_frames; fp->tx_coal_usec = coal->tx_coalesce_usecs; fp->tx_coal_count = coal->tx_max_coalesced_frames; db_val = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count); WRITE_ONCE(fp->cq_irq_db, db_val); rxqs = rtnl_dereference(fp->rxqs); if (!rxqs) return 0; for (i = 0; i < netdev->real_num_rx_queues; i++) WRITE_ONCE(rxqs[i]->irq_db_val, db_val); db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec, fp->tx_coal_count); for (i = 0; i < netdev->real_num_tx_queues; i++) WRITE_ONCE(fp->txqs[i]->irq_db_val, db_val); return 0; } static void fun_get_channels(struct net_device *netdev, struct ethtool_channels *chan) { chan->max_rx = netdev->num_rx_queues; chan->rx_count = netdev->real_num_rx_queues; chan->max_tx = netdev->num_tx_queues; chan->tx_count = netdev->real_num_tx_queues; } static int fun_set_channels(struct net_device *netdev, struct ethtool_channels *chan) { if (!chan->tx_count || !chan->rx_count) return -EINVAL; if (chan->tx_count == netdev->real_num_tx_queues && chan->rx_count == netdev->real_num_rx_queues) return 0; if (netif_running(netdev)) return fun_change_num_queues(netdev, chan->tx_count, chan->rx_count); fun_set_ring_count(netdev, chan->tx_count, chan->rx_count); return 0; } static void fun_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kring, struct netlink_ext_ack *extack) { const struct funeth_priv *fp = netdev_priv(netdev); unsigned int max_depth = fp->fdev->q_depth; /* We size CQs to be twice the RQ depth so max RQ depth is half the * max queue depth. */ ring->rx_max_pending = max_depth / 2; ring->tx_max_pending = max_depth; ring->rx_pending = fp->rq_depth; ring->tx_pending = fp->sq_depth; kring->rx_buf_len = PAGE_SIZE; kring->cqe_size = FUNETH_CQE_SIZE; } static int fun_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kring, struct netlink_ext_ack *extack) { struct funeth_priv *fp = netdev_priv(netdev); int rc; if (ring->rx_mini_pending || ring->rx_jumbo_pending) return -EINVAL; /* queue depths must be powers-of-2 */ if (!is_power_of_2(ring->rx_pending) || !is_power_of_2(ring->tx_pending)) return -EINVAL; if (ring->rx_pending < FUNETH_MIN_QDEPTH || ring->tx_pending < FUNETH_MIN_QDEPTH) return -EINVAL; if (fp->sq_depth == ring->tx_pending && fp->rq_depth == ring->rx_pending) return 0; if (netif_running(netdev)) { struct fun_qset req = { .cq_depth = 2 * ring->rx_pending, .rq_depth = ring->rx_pending, .sq_depth = ring->tx_pending }; rc = fun_replace_queues(netdev, &req, extack); if (rc) return rc; } fp->sq_depth = ring->tx_pending; fp->rq_depth = ring->rx_pending; fp->cq_depth = 2 * fp->rq_depth; return 0; } static int fun_get_sset_count(struct net_device *dev, int sset) { const struct funeth_priv *fp = netdev_priv(dev); int n; switch (sset) { case ETH_SS_STATS: n = (dev->real_num_tx_queues + 1) * ARRAY_SIZE(txq_stat_names) + (dev->real_num_rx_queues + 1) * ARRAY_SIZE(rxq_stat_names) + (fp->num_xdpqs + 1) * ARRAY_SIZE(xdpq_stat_names) + ARRAY_SIZE(tls_stat_names); if (fp->port_caps & FUN_PORT_CAP_STATS) { n += ARRAY_SIZE(mac_tx_stat_names) + ARRAY_SIZE(mac_rx_stat_names); } return n; default: break; } return 0; } static void fun_get_strings(struct net_device *netdev, u32 sset, u8 *data) { const struct funeth_priv *fp = netdev_priv(netdev); unsigned int i, j; u8 *p = data; switch (sset) { case ETH_SS_STATS: if (fp->port_caps & FUN_PORT_CAP_STATS) { memcpy(p, mac_tx_stat_names, sizeof(mac_tx_stat_names)); p += sizeof(mac_tx_stat_names); memcpy(p, mac_rx_stat_names, sizeof(mac_rx_stat_names)); p += sizeof(mac_rx_stat_names); } for (i = 0; i < netdev->real_num_tx_queues; i++) { for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++) ethtool_sprintf(&p, "%s[%u]", txq_stat_names[j], i); } for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++) ethtool_sprintf(&p, txq_stat_names[j]); for (i = 0; i < fp->num_xdpqs; i++) { for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++) ethtool_sprintf(&p, "%s[%u]", xdpq_stat_names[j], i); } for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++) ethtool_sprintf(&p, xdpq_stat_names[j]); for (i = 0; i < netdev->real_num_rx_queues; i++) { for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++) ethtool_sprintf(&p, "%s[%u]", rxq_stat_names[j], i); } for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++) ethtool_sprintf(&p, rxq_stat_names[j]); for (j = 0; j < ARRAY_SIZE(tls_stat_names); j++) ethtool_sprintf(&p, tls_stat_names[j]); break; default: break; } } static u64 *get_mac_stats(const struct funeth_priv *fp, u64 *data) { #define TX_STAT(s) \ *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s]) TX_STAT(etherStatsOctets); TX_STAT(etherStatsPkts); TX_STAT(VLANTransmittedOK); TX_STAT(ifOutUcastPkts); TX_STAT(ifOutMulticastPkts); TX_STAT(ifOutBroadcastPkts); TX_STAT(ifOutErrors); TX_STAT(CBFCPAUSEFramesTransmitted_0); TX_STAT(CBFCPAUSEFramesTransmitted_1); TX_STAT(CBFCPAUSEFramesTransmitted_2); TX_STAT(CBFCPAUSEFramesTransmitted_3); TX_STAT(CBFCPAUSEFramesTransmitted_4); TX_STAT(CBFCPAUSEFramesTransmitted_5); TX_STAT(CBFCPAUSEFramesTransmitted_6); TX_STAT(CBFCPAUSEFramesTransmitted_7); TX_STAT(CBFCPAUSEFramesTransmitted_8); TX_STAT(CBFCPAUSEFramesTransmitted_9); TX_STAT(CBFCPAUSEFramesTransmitted_10); TX_STAT(CBFCPAUSEFramesTransmitted_11); TX_STAT(CBFCPAUSEFramesTransmitted_12); TX_STAT(CBFCPAUSEFramesTransmitted_13); TX_STAT(CBFCPAUSEFramesTransmitted_14); TX_STAT(CBFCPAUSEFramesTransmitted_15); #define RX_STAT(s) *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_##s]) RX_STAT(etherStatsOctets); RX_STAT(etherStatsPkts); RX_STAT(VLANReceivedOK); RX_STAT(ifInUcastPkts); RX_STAT(ifInMulticastPkts); RX_STAT(ifInBroadcastPkts); RX_STAT(etherStatsDropEvents); RX_STAT(ifInErrors); RX_STAT(aAlignmentErrors); RX_STAT(CBFCPAUSEFramesReceived_0); RX_STAT(CBFCPAUSEFramesReceived_1); RX_STAT(CBFCPAUSEFramesReceived_2); RX_STAT(CBFCPAUSEFramesReceived_3); RX_STAT(CBFCPAUSEFramesReceived_4); RX_STAT(CBFCPAUSEFramesReceived_5); RX_STAT(CBFCPAUSEFramesReceived_6); RX_STAT(CBFCPAUSEFramesReceived_7); RX_STAT(CBFCPAUSEFramesReceived_8); RX_STAT(CBFCPAUSEFramesReceived_9); RX_STAT(CBFCPAUSEFramesReceived_10); RX_STAT(CBFCPAUSEFramesReceived_11); RX_STAT(CBFCPAUSEFramesReceived_12); RX_STAT(CBFCPAUSEFramesReceived_13); RX_STAT(CBFCPAUSEFramesReceived_14); RX_STAT(CBFCPAUSEFramesReceived_15); return data; #undef TX_STAT #undef RX_STAT } static void fun_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { const struct funeth_priv *fp = netdev_priv(netdev); struct funeth_txq_stats txs; struct funeth_rxq_stats rxs; struct funeth_txq **xdpqs; struct funeth_rxq **rxqs; unsigned int i, start; u64 *totals, *tot; if (fp->port_caps & FUN_PORT_CAP_STATS) data = get_mac_stats(fp, data); rxqs = rtnl_dereference(fp->rxqs); if (!rxqs) return; #define ADD_STAT(cnt) do { \ *data = (cnt); *tot++ += *data++; \ } while (0) /* Tx queues */ totals = data + netdev->real_num_tx_queues * ARRAY_SIZE(txq_stat_names); for (i = 0; i < netdev->real_num_tx_queues; i++) { tot = totals; FUN_QSTAT_READ(fp->txqs[i], start, txs); ADD_STAT(txs.tx_pkts); ADD_STAT(txs.tx_bytes); ADD_STAT(txs.tx_cso); ADD_STAT(txs.tx_tso); ADD_STAT(txs.tx_encap_tso); ADD_STAT(txs.tx_uso); ADD_STAT(txs.tx_more); ADD_STAT(txs.tx_nstops); ADD_STAT(txs.tx_nrestarts); ADD_STAT(txs.tx_map_err); ADD_STAT(txs.tx_tls_pkts); ADD_STAT(txs.tx_tls_bytes); ADD_STAT(txs.tx_tls_fallback); ADD_STAT(txs.tx_tls_drops); } data += ARRAY_SIZE(txq_stat_names); /* XDP Tx queues */ xdpqs = rtnl_dereference(fp->xdpqs); totals = data + fp->num_xdpqs * ARRAY_SIZE(xdpq_stat_names); for (i = 0; i < fp->num_xdpqs; i++) { tot = totals; FUN_QSTAT_READ(xdpqs[i], start, txs); ADD_STAT(txs.tx_pkts); ADD_STAT(txs.tx_bytes); ADD_STAT(txs.tx_xdp_full); ADD_STAT(txs.tx_map_err); } data += ARRAY_SIZE(xdpq_stat_names); /* Rx queues */ totals = data + netdev->real_num_rx_queues * ARRAY_SIZE(rxq_stat_names); for (i = 0; i < netdev->real_num_rx_queues; i++) { tot = totals; FUN_QSTAT_READ(rxqs[i], start, rxs); ADD_STAT(rxs.rx_pkts); ADD_STAT(rxs.rx_bytes); ADD_STAT(rxs.rx_cso); ADD_STAT(rxs.gro_pkts); ADD_STAT(rxs.gro_merged); ADD_STAT(rxs.xdp_tx); ADD_STAT(rxs.xdp_redir); ADD_STAT(rxs.xdp_drops); ADD_STAT(rxs.rx_bufs); ADD_STAT(rxs.rx_page_alloc); ADD_STAT(rxs.rx_mem_drops + rxs.xdp_err); ADD_STAT(rxs.rx_budget); ADD_STAT(rxs.rx_map_err); } data += ARRAY_SIZE(rxq_stat_names); #undef ADD_STAT *data++ = atomic64_read(&fp->tx_tls_add); *data++ = atomic64_read(&fp->tx_tls_del); *data++ = atomic64_read(&fp->tx_tls_resync); } #define RX_STAT(fp, s) be64_to_cpu((fp)->stats[PORT_MAC_RX_##s]) #define TX_STAT(fp, s) \ be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s]) #define FEC_STAT(fp, s) \ be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + \ PORT_MAC_TX_STATS_MAX + PORT_MAC_FEC_##s]) static void fun_get_pause_stats(struct net_device *netdev, struct ethtool_pause_stats *stats) { const struct funeth_priv *fp = netdev_priv(netdev); if (!(fp->port_caps & FUN_PORT_CAP_STATS)) return; stats->tx_pause_frames = TX_STAT(fp, aPAUSEMACCtrlFramesTransmitted); stats->rx_pause_frames = RX_STAT(fp, aPAUSEMACCtrlFramesReceived); } static void fun_get_802_3_stats(struct net_device *netdev, struct ethtool_eth_mac_stats *stats) { const struct funeth_priv *fp = netdev_priv(netdev); if (!(fp->port_caps & FUN_PORT_CAP_STATS)) return; stats->FramesTransmittedOK = TX_STAT(fp, aFramesTransmittedOK); stats->FramesReceivedOK = RX_STAT(fp, aFramesReceivedOK); stats->FrameCheckSequenceErrors = RX_STAT(fp, aFrameCheckSequenceErrors); stats->OctetsTransmittedOK = TX_STAT(fp, OctetsTransmittedOK); stats->OctetsReceivedOK = RX_STAT(fp, OctetsReceivedOK); stats->InRangeLengthErrors = RX_STAT(fp, aInRangeLengthErrors); stats->FrameTooLongErrors = RX_STAT(fp, aFrameTooLongErrors); } static void fun_get_802_3_ctrl_stats(struct net_device *netdev, struct ethtool_eth_ctrl_stats *stats) { const struct funeth_priv *fp = netdev_priv(netdev); if (!(fp->port_caps & FUN_PORT_CAP_STATS)) return; stats->MACControlFramesTransmitted = TX_STAT(fp, MACControlFramesTransmitted); stats->MACControlFramesReceived = RX_STAT(fp, MACControlFramesReceived); } static void fun_get_rmon_stats(struct net_device *netdev, struct ethtool_rmon_stats *stats, const struct ethtool_rmon_hist_range **ranges) { static const struct ethtool_rmon_hist_range rmon_ranges[] = { { 64, 64 }, { 65, 127 }, { 128, 255 }, { 256, 511 }, { 512, 1023 }, { 1024, 1518 }, { 1519, 32767 }, {} }; const struct funeth_priv *fp = netdev_priv(netdev); if (!(fp->port_caps & FUN_PORT_CAP_STATS)) return; stats->undersize_pkts = RX_STAT(fp, etherStatsUndersizePkts); stats->oversize_pkts = RX_STAT(fp, etherStatsOversizePkts); stats->fragments = RX_STAT(fp, etherStatsFragments); stats->jabbers = RX_STAT(fp, etherStatsJabbers); stats->hist[0] = RX_STAT(fp, etherStatsPkts64Octets); stats->hist[1] = RX_STAT(fp, etherStatsPkts65to127Octets); stats->hist[2] = RX_STAT(fp, etherStatsPkts128to255Octets); stats->hist[3] = RX_STAT(fp, etherStatsPkts256to511Octets); stats->hist[4] = RX_STAT(fp, etherStatsPkts512to1023Octets); stats->hist[5] = RX_STAT(fp, etherStatsPkts1024to1518Octets); stats->hist[6] = RX_STAT(fp, etherStatsPkts1519toMaxOctets); stats->hist_tx[0] = TX_STAT(fp, etherStatsPkts64Octets); stats->hist_tx[1] = TX_STAT(fp, etherStatsPkts65to127Octets); stats->hist_tx[2] = TX_STAT(fp, etherStatsPkts128to255Octets); stats->hist_tx[3] = TX_STAT(fp, etherStatsPkts256to511Octets); stats->hist_tx[4] = TX_STAT(fp, etherStatsPkts512to1023Octets); stats->hist_tx[5] = TX_STAT(fp, etherStatsPkts1024to1518Octets); stats->hist_tx[6] = TX_STAT(fp, etherStatsPkts1519toMaxOctets); *ranges = rmon_ranges; } static void fun_get_fec_stats(struct net_device *netdev, struct ethtool_fec_stats *stats) { const struct funeth_priv *fp = netdev_priv(netdev); if (!(fp->port_caps & FUN_PORT_CAP_STATS)) return; stats->corrected_blocks.total = FEC_STAT(fp, Correctable); stats->uncorrectable_blocks.total = FEC_STAT(fp, Uncorrectable); } #undef RX_STAT #undef TX_STAT #undef FEC_STAT static int fun_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { switch (cmd->cmd) { case ETHTOOL_GRXRINGS: cmd->data = netdev->real_num_rx_queues; return 0; default: break; } return -EOPNOTSUPP; } static int fun_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) { return 0; } static u32 fun_get_rxfh_indir_size(struct net_device *netdev) { const struct funeth_priv *fp = netdev_priv(netdev); return fp->indir_table_nentries; } static u32 fun_get_rxfh_key_size(struct net_device *netdev) { const struct funeth_priv *fp = netdev_priv(netdev); return sizeof(fp->rss_key); } static int fun_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { const struct funeth_priv *fp = netdev_priv(netdev); if (!fp->rss_cfg) return -EOPNOTSUPP; if (indir) memcpy(indir, fp->indir_table, sizeof(u32) * fp->indir_table_nentries); if (key) memcpy(key, fp->rss_key, sizeof(fp->rss_key)); if (hfunc) *hfunc = fp->hash_algo == FUN_ETH_RSS_ALG_TOEPLITZ ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_CRC32; return 0; } static int fun_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { struct funeth_priv *fp = netdev_priv(netdev); const u32 *rss_indir = indir ? indir : fp->indir_table; const u8 *rss_key = key ? key : fp->rss_key; enum fun_eth_hash_alg algo; if (!fp->rss_cfg) return -EOPNOTSUPP; if (hfunc == ETH_RSS_HASH_NO_CHANGE) algo = fp->hash_algo; else if (hfunc == ETH_RSS_HASH_CRC32) algo = FUN_ETH_RSS_ALG_CRC32; else if (hfunc == ETH_RSS_HASH_TOP) algo = FUN_ETH_RSS_ALG_TOEPLITZ; else return -EINVAL; /* If the port is enabled try to reconfigure RSS and keep the new * settings if successful. If it is down we update the RSS settings * and apply them at the next UP time. */ if (netif_running(netdev)) { int rc = fun_config_rss(netdev, algo, rss_key, rss_indir, FUN_ADMIN_SUBOP_MODIFY); if (rc) return rc; } fp->hash_algo = algo; if (key) memcpy(fp->rss_key, key, sizeof(fp->rss_key)); if (indir) memcpy(fp->indir_table, indir, sizeof(u32) * fp->indir_table_nentries); return 0; } static int fun_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; info->phc_index = -1; info->tx_types = BIT(HWTSTAMP_TX_OFF); info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); return 0; } static unsigned int to_ethtool_fec(unsigned int fun_fec) { unsigned int fec = 0; if (fun_fec == FUN_PORT_FEC_NA) fec |= ETHTOOL_FEC_NONE; if (fun_fec & FUN_PORT_FEC_OFF) fec |= ETHTOOL_FEC_OFF; if (fun_fec & FUN_PORT_FEC_RS) fec |= ETHTOOL_FEC_RS; if (fun_fec & FUN_PORT_FEC_FC) fec |= ETHTOOL_FEC_BASER; if (fun_fec & FUN_PORT_FEC_AUTO) fec |= ETHTOOL_FEC_AUTO; return fec; } static int fun_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fec) { struct funeth_priv *fp = netdev_priv(netdev); u64 fec_data; int rc; rc = fun_port_read_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, &fec_data); if (rc) return rc; fec->active_fec = to_ethtool_fec(fec_data & 0xff); fec->fec = to_ethtool_fec(fec_data >> 8); return 0; } static int fun_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fec) { struct funeth_priv *fp = netdev_priv(netdev); u64 fec_mode; switch (fec->fec) { case ETHTOOL_FEC_AUTO: fec_mode = FUN_PORT_FEC_AUTO; break; case ETHTOOL_FEC_OFF: if (!(fp->port_caps & FUN_PORT_CAP_FEC_NONE)) return -EINVAL; fec_mode = FUN_PORT_FEC_OFF; break; case ETHTOOL_FEC_BASER: if (!(fp->port_caps & FUN_PORT_CAP_FEC_FC)) return -EINVAL; fec_mode = FUN_PORT_FEC_FC; break; case ETHTOOL_FEC_RS: if (!(fp->port_caps & FUN_PORT_CAP_FEC_RS)) return -EINVAL; fec_mode = FUN_PORT_FEC_RS; break; default: return -EINVAL; } return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, fec_mode); } static int fun_get_port_module_page(struct net_device *netdev, const struct ethtool_module_eeprom *req, struct netlink_ext_ack *extack) { union { struct fun_admin_port_req req; struct fun_admin_port_xcvr_read_rsp rsp; } cmd; struct funeth_priv *fp = netdev_priv(netdev); int rc; if (fp->port_caps & FUN_PORT_CAP_VPORT) { NL_SET_ERR_MSG_MOD(extack, "Specified port is virtual, only physical ports have modules"); return -EOPNOTSUPP; } cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT, sizeof(cmd.req)); cmd.req.u.xcvr_read = FUN_ADMIN_PORT_XCVR_READ_REQ_INIT(0, netdev->dev_port, req->bank, req->page, req->offset, req->length, req->i2c_address); rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp, sizeof(cmd.rsp), 0); if (rc) return rc; memcpy(req->data, cmd.rsp.data, req->length); return req->length; } static const struct ethtool_ops fun_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, .get_link_ksettings = fun_get_link_ksettings, .set_link_ksettings = fun_set_link_ksettings, .set_phys_id = fun_set_phys_id, .get_drvinfo = fun_get_drvinfo, .get_msglevel = fun_get_msglevel, .set_msglevel = fun_set_msglevel, .get_regs_len = fun_get_regs_len, .get_regs = fun_get_regs, .get_link = ethtool_op_get_link, .get_coalesce = fun_get_coalesce, .set_coalesce = fun_set_coalesce, .get_ts_info = fun_get_ts_info, .get_ringparam = fun_get_ringparam, .set_ringparam = fun_set_ringparam, .get_sset_count = fun_get_sset_count, .get_strings = fun_get_strings, .get_ethtool_stats = fun_get_ethtool_stats, .get_rxnfc = fun_get_rxnfc, .set_rxnfc = fun_set_rxnfc, .get_rxfh_indir_size = fun_get_rxfh_indir_size, .get_rxfh_key_size = fun_get_rxfh_key_size, .get_rxfh = fun_get_rxfh, .set_rxfh = fun_set_rxfh, .get_channels = fun_get_channels, .set_channels = fun_set_channels, .get_fecparam = fun_get_fecparam, .set_fecparam = fun_set_fecparam, .get_pauseparam = fun_get_pauseparam, .set_pauseparam = fun_set_pauseparam, .nway_reset = fun_restart_an, .get_pause_stats = fun_get_pause_stats, .get_fec_stats = fun_get_fec_stats, .get_eth_mac_stats = fun_get_802_3_stats, .get_eth_ctrl_stats = fun_get_802_3_ctrl_stats, .get_rmon_stats = fun_get_rmon_stats, .get_module_eeprom_by_page = fun_get_port_module_page, }; void fun_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &fun_ethtool_ops; }
linux-master
drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) #include <linux/dma-mapping.h> #include <linux/ip.h> #include <linux/pci.h> #include <linux/skbuff.h> #include <linux/tcp.h> #include <uapi/linux/udp.h> #include "funeth.h" #include "funeth_ktls.h" #include "funeth_txrx.h" #include "funeth_trace.h" #include "fun_queue.h" #define FUN_XDP_CLEAN_THRES 32 #define FUN_XDP_CLEAN_BATCH 16 /* DMA-map a packet and return the (length, DMA_address) pairs for its * segments. If a mapping error occurs -ENOMEM is returned. The packet * consists of an skb_shared_info and one additional address/length pair. */ static int fun_map_pkt(struct device *dev, const struct skb_shared_info *si, void *data, unsigned int data_len, dma_addr_t *addr, unsigned int *len) { const skb_frag_t *fp, *end; *len = data_len; *addr = dma_map_single(dev, data, *len, DMA_TO_DEVICE); if (dma_mapping_error(dev, *addr)) return -ENOMEM; if (!si) return 0; for (fp = si->frags, end = fp + si->nr_frags; fp < end; fp++) { *++len = skb_frag_size(fp); *++addr = skb_frag_dma_map(dev, fp, 0, *len, DMA_TO_DEVICE); if (dma_mapping_error(dev, *addr)) goto unwind; } return 0; unwind: while (fp-- > si->frags) dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); dma_unmap_single(dev, addr[-1], data_len, DMA_TO_DEVICE); return -ENOMEM; } /* Return the address just past the end of a Tx queue's descriptor ring. * It exploits the fact that the HW writeback area is just after the end * of the descriptor ring. */ static void *txq_end(const struct funeth_txq *q) { return (void *)q->hw_wb; } /* Return the amount of space within a Tx ring from the given address to the * end. */ static unsigned int txq_to_end(const struct funeth_txq *q, void *p) { return txq_end(q) - p; } /* Return the number of Tx descriptors occupied by a Tx request. */ static unsigned int tx_req_ndesc(const struct fun_eth_tx_req *req) { return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8); } /* Write a gather list to the Tx descriptor at @req from @ngle address/length * pairs. */ static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q, struct fun_eth_tx_req *req, const dma_addr_t *addrs, const unsigned int *lens, unsigned int ngle) { struct fun_dataop_gl *gle; unsigned int i; req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8; for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm; i < ngle && txq_to_end(q, gle); i++, gle++) fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]); if (txq_to_end(q, gle) == 0) { gle = (struct fun_dataop_gl *)q->desc; for ( ; i < ngle; i++, gle++) fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]); } return gle; } static __be16 tcp_hdr_doff_flags(const struct tcphdr *th) { return *(__be16 *)&tcp_flag_word(th); } static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q, unsigned int *tls_len) { #if IS_ENABLED(CONFIG_TLS_DEVICE) const struct fun_ktls_tx_ctx *tls_ctx; u32 datalen, seq; datalen = skb->len - skb_tcp_all_headers(skb); if (!datalen) return skb; if (likely(!tls_offload_tx_resync_pending(skb->sk))) { seq = ntohl(tcp_hdr(skb)->seq); tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX); if (likely(tls_ctx->next_seq == seq)) { *tls_len = datalen; return skb; } if (seq - tls_ctx->next_seq < U32_MAX / 4) { tls_offload_tx_resync_request(skb->sk, seq, tls_ctx->next_seq); } } FUN_QSTAT_INC(q, tx_tls_fallback); skb = tls_encrypt_skb(skb); if (!skb) FUN_QSTAT_INC(q, tx_tls_drops); return skb; #else return NULL; #endif } /* Write as many descriptors as needed for the supplied skb starting at the * current producer location. The caller has made certain enough descriptors * are available. * * Returns the number of descriptors written, 0 on error. */ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q, unsigned int tls_len) { unsigned int extra_bytes = 0, extra_pkts = 0; unsigned int idx = q->prod_cnt & q->mask; const struct skb_shared_info *shinfo; unsigned int lens[MAX_SKB_FRAGS + 1]; dma_addr_t addrs[MAX_SKB_FRAGS + 1]; struct fun_eth_tx_req *req; struct fun_dataop_gl *gle; const struct tcphdr *th; unsigned int l4_hlen; unsigned int ngle; u16 flags; shinfo = skb_shinfo(skb); if (unlikely(fun_map_pkt(q->dma_dev, shinfo, skb->data, skb_headlen(skb), addrs, lens))) { FUN_QSTAT_INC(q, tx_map_err); return 0; } req = fun_tx_desc_addr(q, idx); req->op = FUN_ETH_OP_TX; req->len8 = 0; req->flags = 0; req->suboff8 = offsetof(struct fun_eth_tx_req, dataop); req->repr_idn = 0; req->encap_proto = 0; if (likely(shinfo->gso_size)) { if (skb->encapsulation) { u16 ol4_ofst; flags = FUN_ETH_OUTER_EN | FUN_ETH_INNER_LSO | FUN_ETH_UPDATE_INNER_L4_CKSUM | FUN_ETH_UPDATE_OUTER_L3_LEN; if (shinfo->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { flags |= FUN_ETH_UPDATE_OUTER_L4_LEN | FUN_ETH_OUTER_UDP; if (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) flags |= FUN_ETH_UPDATE_OUTER_L4_CKSUM; ol4_ofst = skb_transport_offset(skb); } else { ol4_ofst = skb_inner_network_offset(skb); } if (ip_hdr(skb)->version == 4) flags |= FUN_ETH_UPDATE_OUTER_L3_CKSUM; else flags |= FUN_ETH_OUTER_IPV6; if (skb->inner_network_header) { if (inner_ip_hdr(skb)->version == 4) flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM | FUN_ETH_UPDATE_INNER_L3_LEN; else flags |= FUN_ETH_INNER_IPV6 | FUN_ETH_UPDATE_INNER_L3_LEN; } th = inner_tcp_hdr(skb); l4_hlen = __tcp_hdrlen(th); fun_eth_offload_init(&req->offload, flags, shinfo->gso_size, tcp_hdr_doff_flags(th), 0, skb_inner_network_offset(skb), skb_inner_transport_offset(skb), skb_network_offset(skb), ol4_ofst); FUN_QSTAT_INC(q, tx_encap_tso); } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { flags = FUN_ETH_INNER_LSO | FUN_ETH_INNER_UDP | FUN_ETH_UPDATE_INNER_L4_CKSUM | FUN_ETH_UPDATE_INNER_L4_LEN | FUN_ETH_UPDATE_INNER_L3_LEN; if (ip_hdr(skb)->version == 4) flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM; else flags |= FUN_ETH_INNER_IPV6; l4_hlen = sizeof(struct udphdr); fun_eth_offload_init(&req->offload, flags, shinfo->gso_size, cpu_to_be16(l4_hlen << 10), 0, skb_network_offset(skb), skb_transport_offset(skb), 0, 0); FUN_QSTAT_INC(q, tx_uso); } else { /* HW considers one set of headers as inner */ flags = FUN_ETH_INNER_LSO | FUN_ETH_UPDATE_INNER_L4_CKSUM | FUN_ETH_UPDATE_INNER_L3_LEN; if (shinfo->gso_type & SKB_GSO_TCPV6) flags |= FUN_ETH_INNER_IPV6; else flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM; th = tcp_hdr(skb); l4_hlen = __tcp_hdrlen(th); fun_eth_offload_init(&req->offload, flags, shinfo->gso_size, tcp_hdr_doff_flags(th), 0, skb_network_offset(skb), skb_transport_offset(skb), 0, 0); FUN_QSTAT_INC(q, tx_tso); } u64_stats_update_begin(&q->syncp); q->stats.tx_cso += shinfo->gso_segs; u64_stats_update_end(&q->syncp); extra_pkts = shinfo->gso_segs - 1; extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) + l4_hlen) * extra_pkts; } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { flags = FUN_ETH_UPDATE_INNER_L4_CKSUM; if (skb->csum_offset == offsetof(struct udphdr, check)) flags |= FUN_ETH_INNER_UDP; fun_eth_offload_init(&req->offload, flags, 0, 0, 0, 0, skb_checksum_start_offset(skb), 0, 0); FUN_QSTAT_INC(q, tx_cso); } else { fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0); } ngle = shinfo->nr_frags + 1; req->dataop = FUN_DATAOP_HDR_INIT(ngle, 0, ngle, 0, skb->len); gle = fun_write_gl(q, req, addrs, lens, ngle); if (IS_ENABLED(CONFIG_TLS_DEVICE) && unlikely(tls_len)) { struct fun_eth_tls *tls = (struct fun_eth_tls *)gle; struct fun_ktls_tx_ctx *tls_ctx; req->len8 += FUNETH_TLS_SZ / 8; req->flags = cpu_to_be16(FUN_ETH_TX_TLS); tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX); tls->tlsid = tls_ctx->tlsid; tls_ctx->next_seq += tls_len; u64_stats_update_begin(&q->syncp); q->stats.tx_tls_bytes += tls_len; q->stats.tx_tls_pkts += 1 + extra_pkts; u64_stats_update_end(&q->syncp); } u64_stats_update_begin(&q->syncp); q->stats.tx_bytes += skb->len + extra_bytes; q->stats.tx_pkts += 1 + extra_pkts; u64_stats_update_end(&q->syncp); q->info[idx].skb = skb; trace_funeth_tx(q, skb->len, idx, req->dataop.ngather); return tx_req_ndesc(req); } /* Return the number of available descriptors of a Tx queue. * HW assumes head==tail means the ring is empty so we need to keep one * descriptor unused. */ static unsigned int fun_txq_avail(const struct funeth_txq *q) { return q->mask - q->prod_cnt + q->cons_cnt; } /* Stop a queue if it can't handle another worst-case packet. */ static void fun_tx_check_stop(struct funeth_txq *q) { if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC)) return; netif_tx_stop_queue(q->ndq); /* NAPI reclaim is freeing packets in parallel with us and we may race. * We have stopped the queue but check again after synchronizing with * reclaim. */ smp_mb(); if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC)) FUN_QSTAT_INC(q, tx_nstops); else netif_tx_start_queue(q->ndq); } /* Return true if a queue has enough space to restart. Current condition is * that the queue must be >= 1/4 empty. */ static bool fun_txq_may_restart(struct funeth_txq *q) { return fun_txq_avail(q) >= q->mask / 4; } netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct funeth_priv *fp = netdev_priv(netdev); unsigned int qid = skb_get_queue_mapping(skb); struct funeth_txq *q = fp->txqs[qid]; unsigned int tls_len = 0; unsigned int ndesc; if (tls_is_skb_tx_device_offloaded(skb)) { skb = fun_tls_tx(skb, q, &tls_len); if (unlikely(!skb)) goto dropped; } ndesc = write_pkt_desc(skb, q, tls_len); if (unlikely(!ndesc)) { dev_kfree_skb_any(skb); goto dropped; } q->prod_cnt += ndesc; fun_tx_check_stop(q); skb_tx_timestamp(skb); if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more())) fun_txq_wr_db(q); else FUN_QSTAT_INC(q, tx_more); return NETDEV_TX_OK; dropped: /* A dropped packet may be the last one in a xmit_more train, * ring the doorbell just in case. */ if (!netdev_xmit_more()) fun_txq_wr_db(q); return NETDEV_TX_OK; } /* Return a Tx queue's HW head index written back to host memory. */ static u16 txq_hw_head(const struct funeth_txq *q) { return (u16)be64_to_cpu(*q->hw_wb); } /* Unmap the Tx packet starting at the given descriptor index and * return the number of Tx descriptors it occupied. */ static unsigned int fun_unmap_pkt(const struct funeth_txq *q, unsigned int idx) { const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx); unsigned int ngle = req->dataop.ngather; struct fun_dataop_gl *gle; if (ngle) { gle = (struct fun_dataop_gl *)req->dataop.imm; dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data), be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE); for (gle++; --ngle && txq_to_end(q, gle); gle++) dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data), be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE); for (gle = (struct fun_dataop_gl *)q->desc; ngle; ngle--, gle++) dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data), be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE); } return tx_req_ndesc(req); } /* Reclaim completed Tx descriptors and free their packets. Restart a stopped * queue if we freed enough descriptors. * * Return true if we exhausted the budget while there is more work to be done. */ static bool fun_txq_reclaim(struct funeth_txq *q, int budget) { unsigned int npkts = 0, nbytes = 0, ndesc = 0; unsigned int head, limit, reclaim_idx; /* budget may be 0, e.g., netpoll */ limit = budget ? budget : UINT_MAX; for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask; head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) { /* The HW head is continually updated, ensure we don't read * descriptor state before the head tells us to reclaim it. * On the enqueue side the doorbell is an implicit write * barrier. */ rmb(); do { unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx); struct sk_buff *skb = q->info[reclaim_idx].skb; trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head); nbytes += skb->len; napi_consume_skb(skb, budget); ndesc += pkt_desc; reclaim_idx = (reclaim_idx + pkt_desc) & q->mask; npkts++; } while (reclaim_idx != head && npkts < limit); } q->cons_cnt += ndesc; netdev_tx_completed_queue(q->ndq, npkts, nbytes); smp_mb(); /* pairs with the one in fun_tx_check_stop() */ if (unlikely(netif_tx_queue_stopped(q->ndq) && fun_txq_may_restart(q))) { netif_tx_wake_queue(q->ndq); FUN_QSTAT_INC(q, tx_nrestarts); } return reclaim_idx != head; } /* The NAPI handler for Tx queues. */ int fun_txq_napi_poll(struct napi_struct *napi, int budget) { struct fun_irq *irq = container_of(napi, struct fun_irq, napi); struct funeth_txq *q = irq->txq; unsigned int db_val; if (fun_txq_reclaim(q, budget)) return budget; /* exhausted budget */ napi_complete(napi); /* exhausted pending work */ db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask); writel(db_val, q->db); return 0; } /* Reclaim up to @budget completed Tx packets from a TX XDP queue. */ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget) { unsigned int npkts = 0, ndesc = 0, head, reclaim_idx; for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask; head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) { /* The HW head is continually updated, ensure we don't read * descriptor state before the head tells us to reclaim it. * On the enqueue side the doorbell is an implicit write * barrier. */ rmb(); do { unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx); xdp_return_frame(q->info[reclaim_idx].xdpf); trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head); reclaim_idx = (reclaim_idx + pkt_desc) & q->mask; ndesc += pkt_desc; npkts++; } while (reclaim_idx != head && npkts < budget); } q->cons_cnt += ndesc; return npkts; } bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf) { unsigned int idx, nfrags = 1, ndesc = 1, tot_len = xdpf->len; const struct skb_shared_info *si = NULL; unsigned int lens[MAX_SKB_FRAGS + 1]; dma_addr_t dma[MAX_SKB_FRAGS + 1]; struct fun_eth_tx_req *req; if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES) fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH); if (unlikely(xdp_frame_has_frags(xdpf))) { si = xdp_get_shared_info_from_frame(xdpf); tot_len = xdp_get_frame_len(xdpf); nfrags += si->nr_frags; ndesc = DIV_ROUND_UP((sizeof(*req) + nfrags * sizeof(struct fun_dataop_gl)), FUNETH_SQE_SIZE); } if (unlikely(fun_txq_avail(q) < ndesc)) { FUN_QSTAT_INC(q, tx_xdp_full); return false; } if (unlikely(fun_map_pkt(q->dma_dev, si, xdpf->data, xdpf->len, dma, lens))) { FUN_QSTAT_INC(q, tx_map_err); return false; } idx = q->prod_cnt & q->mask; req = fun_tx_desc_addr(q, idx); req->op = FUN_ETH_OP_TX; req->len8 = 0; req->flags = 0; req->suboff8 = offsetof(struct fun_eth_tx_req, dataop); req->repr_idn = 0; req->encap_proto = 0; fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0); req->dataop = FUN_DATAOP_HDR_INIT(nfrags, 0, nfrags, 0, tot_len); fun_write_gl(q, req, dma, lens, nfrags); q->info[idx].xdpf = xdpf; u64_stats_update_begin(&q->syncp); q->stats.tx_bytes += tot_len; q->stats.tx_pkts++; u64_stats_update_end(&q->syncp); trace_funeth_tx(q, tot_len, idx, nfrags); q->prod_cnt += ndesc; return true; } int fun_xdp_xmit_frames(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { struct funeth_priv *fp = netdev_priv(dev); struct funeth_txq *q, **xdpqs; int i, q_idx; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; xdpqs = rcu_dereference_bh(fp->xdpqs); if (unlikely(!xdpqs)) return -ENETDOWN; q_idx = smp_processor_id(); if (unlikely(q_idx >= fp->num_xdpqs)) return -ENXIO; for (q = xdpqs[q_idx], i = 0; i < n; i++) if (!fun_xdp_tx(q, frames[i])) break; if (unlikely(flags & XDP_XMIT_FLUSH)) fun_txq_wr_db(q); return i; } /* Purge a Tx queue of any queued packets. Should be called once HW access * to the packets has been revoked, e.g., after the queue has been disabled. */ static void fun_txq_purge(struct funeth_txq *q) { while (q->cons_cnt != q->prod_cnt) { unsigned int idx = q->cons_cnt & q->mask; q->cons_cnt += fun_unmap_pkt(q, idx); dev_kfree_skb_any(q->info[idx].skb); } netdev_tx_reset_queue(q->ndq); } static void fun_xdpq_purge(struct funeth_txq *q) { while (q->cons_cnt != q->prod_cnt) { unsigned int idx = q->cons_cnt & q->mask; q->cons_cnt += fun_unmap_pkt(q, idx); xdp_return_frame(q->info[idx].xdpf); } } /* Create a Tx queue, allocating all the host resources needed. */ static struct funeth_txq *fun_txq_create_sw(struct net_device *dev, unsigned int qidx, unsigned int ndesc, struct fun_irq *irq) { struct funeth_priv *fp = netdev_priv(dev); struct funeth_txq *q; int numa_node; if (irq) numa_node = fun_irq_node(irq); /* skb Tx queue */ else numa_node = cpu_to_node(qidx); /* XDP Tx queue */ q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node); if (!q) goto err; q->dma_dev = &fp->pdev->dev; q->desc = fun_alloc_ring_mem(q->dma_dev, ndesc, FUNETH_SQE_SIZE, sizeof(*q->info), true, numa_node, &q->dma_addr, (void **)&q->info, &q->hw_wb); if (!q->desc) goto free_q; q->netdev = dev; q->mask = ndesc - 1; q->qidx = qidx; q->numa_node = numa_node; u64_stats_init(&q->syncp); q->init_state = FUN_QSTATE_INIT_SW; return q; free_q: kfree(q); err: netdev_err(dev, "Can't allocate memory for %s queue %u\n", irq ? "Tx" : "XDP", qidx); return NULL; } static void fun_txq_free_sw(struct funeth_txq *q) { struct funeth_priv *fp = netdev_priv(q->netdev); fun_free_ring_mem(q->dma_dev, q->mask + 1, FUNETH_SQE_SIZE, true, q->desc, q->dma_addr, q->info); fp->tx_packets += q->stats.tx_pkts; fp->tx_bytes += q->stats.tx_bytes; fp->tx_dropped += q->stats.tx_map_err; kfree(q); } /* Allocate the device portion of a Tx queue. */ int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq) { struct funeth_priv *fp = netdev_priv(q->netdev); unsigned int irq_idx, ndesc = q->mask + 1; int err; q->irq = irq; *q->hw_wb = 0; q->prod_cnt = 0; q->cons_cnt = 0; irq_idx = irq ? irq->irq_idx : 0; err = fun_sq_create(fp->fdev, FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS | FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR, 0, FUN_HCI_ID_INVALID, ilog2(FUNETH_SQE_SIZE), ndesc, q->dma_addr, fp->tx_coal_count, fp->tx_coal_usec, irq_idx, 0, fp->fdev->kern_end_qid, 0, &q->hw_qid, &q->db); if (err) goto out; err = fun_create_and_bind_tx(fp, q->hw_qid); if (err < 0) goto free_devq; q->ethid = err; if (irq) { irq->txq = q; q->ndq = netdev_get_tx_queue(q->netdev, q->qidx); q->irq_db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec, fp->tx_coal_count); writel(q->irq_db_val, q->db); } q->init_state = FUN_QSTATE_INIT_FULL; netif_info(fp, ifup, q->netdev, "%s queue %u, depth %u, HW qid %u, IRQ idx %u, eth id %u, node %d\n", irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx, q->ethid, q->numa_node); return 0; free_devq: fun_destroy_sq(fp->fdev, q->hw_qid); out: netdev_err(q->netdev, "Failed to create %s queue %u on device, error %d\n", irq ? "Tx" : "XDP", q->qidx, err); return err; } static void fun_txq_free_dev(struct funeth_txq *q) { struct funeth_priv *fp = netdev_priv(q->netdev); if (q->init_state < FUN_QSTATE_INIT_FULL) return; netif_info(fp, ifdown, q->netdev, "Freeing %s queue %u (id %u), IRQ %u, ethid %u\n", q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid, q->irq ? q->irq->irq_idx : 0, q->ethid); fun_destroy_sq(fp->fdev, q->hw_qid); fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, q->ethid); if (q->irq) { q->irq->txq = NULL; fun_txq_purge(q); } else { fun_xdpq_purge(q); } q->init_state = FUN_QSTATE_INIT_SW; } /* Create or advance a Tx queue, allocating all the host and device resources * needed to reach the target state. */ int funeth_txq_create(struct net_device *dev, unsigned int qidx, unsigned int ndesc, struct fun_irq *irq, int state, struct funeth_txq **qp) { struct funeth_txq *q = *qp; int err; if (!q) q = fun_txq_create_sw(dev, qidx, ndesc, irq); if (!q) return -ENOMEM; if (q->init_state >= state) goto out; err = fun_txq_create_dev(q, irq); if (err) { if (!*qp) fun_txq_free_sw(q); return err; } out: *qp = q; return 0; } /* Free Tx queue resources until it reaches the target state. * The queue must be already disconnected from the stack. */ struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state) { if (state < FUN_QSTATE_INIT_FULL) fun_txq_free_dev(q); if (state == FUN_QSTATE_DESTROYED) { fun_txq_free_sw(q); q = NULL; } return q; }
linux-master
drivers/net/ethernet/fungible/funeth/funeth_tx.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) #include "funeth.h" #include "funeth_ktls.h" static int fun_admin_ktls_create(struct funeth_priv *fp, unsigned int id) { struct fun_admin_ktls_create_req req = { .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS, sizeof(req)), .subop = FUN_ADMIN_SUBOP_CREATE, .id = cpu_to_be32(id), }; return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0); } static int fun_ktls_add(struct net_device *netdev, struct sock *sk, enum tls_offload_ctx_dir direction, struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn) { struct funeth_priv *fp = netdev_priv(netdev); struct fun_admin_ktls_modify_req req = { .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS, sizeof(req)), .subop = FUN_ADMIN_SUBOP_MODIFY, .id = cpu_to_be32(fp->ktls_id), .tcp_seq = cpu_to_be32(start_offload_tcp_sn), }; struct fun_admin_ktls_modify_rsp rsp; struct fun_ktls_tx_ctx *tx_ctx; int rc; if (direction != TLS_OFFLOAD_CTX_DIR_TX) return -EOPNOTSUPP; if (crypto_info->version == TLS_1_2_VERSION) req.version = FUN_KTLS_TLSV2; else return -EOPNOTSUPP; switch (crypto_info->cipher_type) { case TLS_CIPHER_AES_GCM_128: { struct tls12_crypto_info_aes_gcm_128 *c = (void *)crypto_info; req.cipher = FUN_KTLS_CIPHER_AES_GCM_128; memcpy(req.key, c->key, sizeof(c->key)); memcpy(req.iv, c->iv, sizeof(c->iv)); memcpy(req.salt, c->salt, sizeof(c->salt)); memcpy(req.record_seq, c->rec_seq, sizeof(c->rec_seq)); break; } default: return -EOPNOTSUPP; } rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, &rsp, sizeof(rsp), 0); memzero_explicit(&req, sizeof(req)); if (rc) return rc; tx_ctx = tls_driver_ctx(sk, direction); tx_ctx->tlsid = rsp.tlsid; tx_ctx->next_seq = start_offload_tcp_sn; atomic64_inc(&fp->tx_tls_add); return 0; } static void fun_ktls_del(struct net_device *netdev, struct tls_context *tls_ctx, enum tls_offload_ctx_dir direction) { struct funeth_priv *fp = netdev_priv(netdev); struct fun_admin_ktls_modify_req req; struct fun_ktls_tx_ctx *tx_ctx; if (direction != TLS_OFFLOAD_CTX_DIR_TX) return; tx_ctx = __tls_driver_ctx(tls_ctx, direction); req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS, offsetof(struct fun_admin_ktls_modify_req, tcp_seq)); req.subop = FUN_ADMIN_SUBOP_MODIFY; req.flags = cpu_to_be16(FUN_KTLS_MODIFY_REMOVE); req.id = cpu_to_be32(fp->ktls_id); req.tlsid = tx_ctx->tlsid; fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0); atomic64_inc(&fp->tx_tls_del); } static int fun_ktls_resync(struct net_device *netdev, struct sock *sk, u32 seq, u8 *rcd_sn, enum tls_offload_ctx_dir direction) { struct funeth_priv *fp = netdev_priv(netdev); struct fun_admin_ktls_modify_req req; struct fun_ktls_tx_ctx *tx_ctx; int rc; if (direction != TLS_OFFLOAD_CTX_DIR_TX) return -EOPNOTSUPP; tx_ctx = tls_driver_ctx(sk, direction); req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS, offsetof(struct fun_admin_ktls_modify_req, key)); req.subop = FUN_ADMIN_SUBOP_MODIFY; req.flags = 0; req.id = cpu_to_be32(fp->ktls_id); req.tlsid = tx_ctx->tlsid; req.tcp_seq = cpu_to_be32(seq); req.version = 0; req.cipher = 0; memcpy(req.record_seq, rcd_sn, sizeof(req.record_seq)); atomic64_inc(&fp->tx_tls_resync); rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0); if (!rc) tx_ctx->next_seq = seq; return rc; } static const struct tlsdev_ops fun_ktls_ops = { .tls_dev_add = fun_ktls_add, .tls_dev_del = fun_ktls_del, .tls_dev_resync = fun_ktls_resync, }; int fun_ktls_init(struct net_device *netdev) { struct funeth_priv *fp = netdev_priv(netdev); int rc; rc = fun_admin_ktls_create(fp, netdev->dev_port); if (rc) return rc; fp->ktls_id = netdev->dev_port; netdev->tlsdev_ops = &fun_ktls_ops; netdev->hw_features |= NETIF_F_HW_TLS_TX; netdev->features |= NETIF_F_HW_TLS_TX; return 0; } void fun_ktls_cleanup(struct funeth_priv *fp) { if (fp->ktls_id == FUN_HCI_ID_INVALID) return; fun_res_destroy(fp->fdev, FUN_ADMIN_OP_KTLS, 0, fp->ktls_id); fp->ktls_id = FUN_HCI_ID_INVALID; }
linux-master
drivers/net/ethernet/fungible/funeth/funeth_ktls.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) #include "funeth.h" #include "funeth_devlink.h" static const struct devlink_ops fun_dl_ops = { }; struct devlink *fun_devlink_alloc(struct device *dev) { return devlink_alloc(&fun_dl_ops, sizeof(struct fun_ethdev), dev); } void fun_devlink_free(struct devlink *devlink) { devlink_free(devlink); } void fun_devlink_register(struct devlink *devlink) { devlink_register(devlink); } void fun_devlink_unregister(struct devlink *devlink) { devlink_unregister(devlink); }
linux-master
drivers/net/ethernet/fungible/funeth/funeth_devlink.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) #include <linux/bpf_trace.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/filter.h> #include <linux/irq.h> #include <linux/pci.h> #include <linux/skbuff.h> #include "funeth_txrx.h" #include "funeth.h" #include "fun_queue.h" #define CREATE_TRACE_POINTS #include "funeth_trace.h" /* Given the device's max supported MTU and pages of at least 4KB a packet can * be scattered into at most 4 buffers. */ #define RX_MAX_FRAGS 4 /* Per packet headroom in non-XDP mode. Present only for 1-frag packets. */ #define FUN_RX_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) /* We try to reuse pages for our buffers. To avoid frequent page ref writes we * take EXTRA_PAGE_REFS references at once and then hand them out one per packet * occupying the buffer. */ #define EXTRA_PAGE_REFS 1000000 #define MIN_PAGE_REFS 1000 enum { FUN_XDP_FLUSH_REDIR = 1, FUN_XDP_FLUSH_TX = 2, }; /* See if a page is running low on refs we are holding and if so take more. */ static void refresh_refs(struct funeth_rxbuf *buf) { if (unlikely(buf->pg_refs < MIN_PAGE_REFS)) { buf->pg_refs += EXTRA_PAGE_REFS; page_ref_add(buf->page, EXTRA_PAGE_REFS); } } /* Offer a buffer to the Rx buffer cache. The cache will hold the buffer if its * page is worth retaining and there's room for it. Otherwise the page is * unmapped and our references released. */ static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf) { struct funeth_rx_cache *c = &q->cache; if (c->prod_cnt - c->cons_cnt <= c->mask && buf->node == numa_mem_id()) { c->bufs[c->prod_cnt & c->mask] = *buf; c->prod_cnt++; } else { dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); __page_frag_cache_drain(buf->page, buf->pg_refs); } } /* Get a page from the Rx buffer cache. We only consider the next available * page and return it if we own all its references. */ static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb) { struct funeth_rx_cache *c = &q->cache; struct funeth_rxbuf *buf; if (c->prod_cnt == c->cons_cnt) return false; /* empty cache */ buf = &c->bufs[c->cons_cnt & c->mask]; if (page_ref_count(buf->page) == buf->pg_refs) { dma_sync_single_for_device(q->dma_dev, buf->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); *rb = *buf; buf->page = NULL; refresh_refs(rb); c->cons_cnt++; return true; } /* Page can't be reused. If the cache is full drop this page. */ if (c->prod_cnt - c->cons_cnt > c->mask) { dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); __page_frag_cache_drain(buf->page, buf->pg_refs); buf->page = NULL; c->cons_cnt++; } return false; } /* Allocate and DMA-map a page for receive. */ static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb, int node, gfp_t gfp) { struct page *p; if (cache_get(q, rb)) return 0; p = __alloc_pages_node(node, gfp | __GFP_NOWARN, 0); if (unlikely(!p)) return -ENOMEM; rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(q->dma_dev, rb->dma_addr))) { FUN_QSTAT_INC(q, rx_map_err); __free_page(p); return -ENOMEM; } FUN_QSTAT_INC(q, rx_page_alloc); rb->page = p; rb->pg_refs = 1; refresh_refs(rb); rb->node = page_is_pfmemalloc(p) ? -1 : page_to_nid(p); return 0; } static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb) { if (rb->page) { dma_unmap_page(q->dma_dev, rb->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); __page_frag_cache_drain(rb->page, rb->pg_refs); rb->page = NULL; } } /* Run the XDP program assigned to an Rx queue. * Return %NULL if the buffer is consumed, or the virtual address of the packet * to turn into an skb. */ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va, int ref_ok, struct funeth_txq *xdp_q) { struct bpf_prog *xdp_prog; struct xdp_frame *xdpf; struct xdp_buff xdp; u32 act; /* VA includes the headroom, frag size includes headroom + tailroom */ xdp_init_buff(&xdp, ALIGN(skb_frag_size(frags), FUN_EPRQ_PKT_ALIGN), &q->xdp_rxq); xdp_prepare_buff(&xdp, buf_va, FUN_XDP_HEADROOM, skb_frag_size(frags) - (FUN_RX_TAILROOM + FUN_XDP_HEADROOM), false); xdp_prog = READ_ONCE(q->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, &xdp); switch (act) { case XDP_PASS: /* remove headroom, which may not be FUN_XDP_HEADROOM now */ skb_frag_size_set(frags, xdp.data_end - xdp.data); skb_frag_off_add(frags, xdp.data - xdp.data_hard_start); goto pass; case XDP_TX: if (unlikely(!ref_ok)) goto pass; xdpf = xdp_convert_buff_to_frame(&xdp); if (!xdpf || !fun_xdp_tx(xdp_q, xdpf)) goto xdp_error; FUN_QSTAT_INC(q, xdp_tx); q->xdp_flush |= FUN_XDP_FLUSH_TX; break; case XDP_REDIRECT: if (unlikely(!ref_ok)) goto pass; if (unlikely(xdp_do_redirect(q->netdev, &xdp, xdp_prog))) goto xdp_error; FUN_QSTAT_INC(q, xdp_redir); q->xdp_flush |= FUN_XDP_FLUSH_REDIR; break; default: bpf_warn_invalid_xdp_action(q->netdev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(q->netdev, xdp_prog, act); xdp_error: q->cur_buf->pg_refs++; /* return frags' page reference */ FUN_QSTAT_INC(q, xdp_err); break; case XDP_DROP: q->cur_buf->pg_refs++; FUN_QSTAT_INC(q, xdp_drops); break; } return NULL; pass: return xdp.data; } /* A CQE contains a fixed completion structure along with optional metadata and * even packet data. Given the start address of a CQE return the start of the * contained fixed structure, which lies at the end. */ static const void *cqe_to_info(const void *cqe) { return cqe + FUNETH_CQE_INFO_OFFSET; } /* The inverse of cqe_to_info(). */ static const void *info_to_cqe(const void *cqe_info) { return cqe_info - FUNETH_CQE_INFO_OFFSET; } /* Return the type of hash provided by the device based on the L3 and L4 * protocols it parsed for the packet. */ static enum pkt_hash_types cqe_to_pkt_hash_type(u16 pkt_parse) { static const enum pkt_hash_types htype_map[] = { PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3, PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L4, PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3, PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3 }; u16 key; /* Build the key from the TCP/UDP and IP/IPv6 bits */ key = ((pkt_parse >> FUN_ETH_RX_CV_OL4_PROT_S) & 6) | ((pkt_parse >> (FUN_ETH_RX_CV_OL3_PROT_S + 1)) & 1); return htype_map[key]; } /* Each received packet can be scattered across several Rx buffers or can * share a buffer with previously received packets depending on the buffer * and packet sizes and the room available in the most recently used buffer. * * The rules are: * - If the buffer at the head of an RQ has not been used it gets (part of) the * next incoming packet. * - Otherwise, if the packet fully fits in the buffer's remaining space the * packet is written there. * - Otherwise, the packet goes into the next Rx buffer. * * This function returns the Rx buffer for a packet or fragment thereof of the * given length. If it isn't @buf it either recycles or frees that buffer * before advancing the queue to the next buffer. * * If called repeatedly with the remaining length of a packet it will walk * through all the buffers containing the packet. */ static struct funeth_rxbuf * get_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len) { if (q->buf_offset + len <= PAGE_SIZE || !q->buf_offset) return buf; /* @buf holds (part of) the packet */ /* The packet occupies part of the next buffer. Move there after * replenishing the current buffer slot either with the spare page or * by reusing the slot's existing page. Note that if a spare page isn't * available and the current packet occupies @buf it is a multi-frag * packet that will be dropped leaving @buf available for reuse. */ if ((page_ref_count(buf->page) == buf->pg_refs && buf->node == numa_mem_id()) || !q->spare_buf.page) { dma_sync_single_for_device(q->dma_dev, buf->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); refresh_refs(buf); } else { cache_offer(q, buf); *buf = q->spare_buf; q->spare_buf.page = NULL; q->rqes[q->rq_cons & q->rq_mask] = FUN_EPRQ_RQBUF_INIT(buf->dma_addr); } q->buf_offset = 0; q->rq_cons++; return &q->bufs[q->rq_cons & q->rq_mask]; } /* Gather the page fragments making up the first Rx packet on @q. Its total * length @tot_len includes optional head- and tail-rooms. * * Return 0 if the device retains ownership of at least some of the pages. * In this case the caller may only copy the packet. * * A non-zero return value gives the caller permission to use references to the * pages, e.g., attach them to skbs. Additionally, if the value is <0 at least * one of the pages is PF_MEMALLOC. * * Regardless of outcome the caller is granted a reference to each of the pages. */ static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len, skb_frag_t *frags) { struct funeth_rxbuf *buf = q->cur_buf; unsigned int frag_len; int ref_ok = 1; for (;;) { buf = get_buf(q, buf, tot_len); /* We always keep the RQ full of buffers so before we can give * one of our pages to the stack we require that we can obtain * a replacement page. If we can't the packet will either be * copied or dropped so we can retain ownership of the page and * reuse it. */ if (!q->spare_buf.page && funeth_alloc_page(q, &q->spare_buf, numa_mem_id(), GFP_ATOMIC | __GFP_MEMALLOC)) ref_ok = 0; frag_len = min_t(unsigned int, tot_len, PAGE_SIZE - q->buf_offset); dma_sync_single_for_cpu(q->dma_dev, buf->dma_addr + q->buf_offset, frag_len, DMA_FROM_DEVICE); buf->pg_refs--; if (ref_ok) ref_ok |= buf->node; skb_frag_fill_page_desc(frags++, buf->page, q->buf_offset, frag_len); tot_len -= frag_len; if (!tot_len) break; q->buf_offset = PAGE_SIZE; } q->buf_offset = ALIGN(q->buf_offset + frag_len, FUN_EPRQ_PKT_ALIGN); q->cur_buf = buf; return ref_ok; } static bool rx_hwtstamp_enabled(const struct net_device *dev) { const struct funeth_priv *d = netdev_priv(dev); return d->hwtstamp_cfg.rx_filter == HWTSTAMP_FILTER_ALL; } /* Advance the CQ pointers and phase tag to the next CQE. */ static void advance_cq(struct funeth_rxq *q) { if (unlikely(q->cq_head == q->cq_mask)) { q->cq_head = 0; q->phase ^= 1; q->next_cqe_info = cqe_to_info(q->cqes); } else { q->cq_head++; q->next_cqe_info += FUNETH_CQE_SIZE; } prefetch(q->next_cqe_info); } /* Process the packet represented by the head CQE of @q. Gather the packet's * fragments, run it through the optional XDP program, and if needed construct * an skb and pass it to the stack. */ static void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q) { const struct fun_eth_cqe *rxreq = info_to_cqe(q->next_cqe_info); unsigned int i, tot_len, pkt_len = be32_to_cpu(rxreq->pkt_len); struct net_device *ndev = q->netdev; skb_frag_t frags[RX_MAX_FRAGS]; struct skb_shared_info *si; unsigned int headroom; gro_result_t gro_res; struct sk_buff *skb; int ref_ok; void *va; u16 cv; u64_stats_update_begin(&q->syncp); q->stats.rx_pkts++; q->stats.rx_bytes += pkt_len; u64_stats_update_end(&q->syncp); advance_cq(q); /* account for head- and tail-room, present only for 1-buffer packets */ tot_len = pkt_len; headroom = be16_to_cpu(rxreq->headroom); if (likely(headroom)) tot_len += FUN_RX_TAILROOM + headroom; ref_ok = fun_gather_pkt(q, tot_len, frags); va = skb_frag_address(frags); if (xdp_q && headroom == FUN_XDP_HEADROOM) { va = fun_run_xdp(q, frags, va, ref_ok, xdp_q); if (!va) return; headroom = 0; /* XDP_PASS trims it */ } if (unlikely(!ref_ok)) goto no_mem; if (likely(headroom)) { /* headroom is either FUN_RX_HEADROOM or FUN_XDP_HEADROOM */ prefetch(va + headroom); skb = napi_build_skb(va, ALIGN(tot_len, FUN_EPRQ_PKT_ALIGN)); if (unlikely(!skb)) goto no_mem; skb_reserve(skb, headroom); __skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); } else { prefetch(va); skb = napi_get_frags(q->napi); if (unlikely(!skb)) goto no_mem; if (ref_ok < 0) skb->pfmemalloc = 1; si = skb_shinfo(skb); si->nr_frags = rxreq->nsgl; for (i = 0; i < si->nr_frags; i++) si->frags[i] = frags[i]; skb->len = pkt_len; skb->data_len = pkt_len; skb->truesize += round_up(pkt_len, FUN_EPRQ_PKT_ALIGN); } skb_record_rx_queue(skb, q->qidx); cv = be16_to_cpu(rxreq->pkt_cv); if (likely((q->netdev->features & NETIF_F_RXHASH) && rxreq->hash)) skb_set_hash(skb, be32_to_cpu(rxreq->hash), cqe_to_pkt_hash_type(cv)); if (likely((q->netdev->features & NETIF_F_RXCSUM) && rxreq->csum)) { FUN_QSTAT_INC(q, rx_cso); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = be16_to_cpu(rxreq->csum) - 1; } if (unlikely(rx_hwtstamp_enabled(q->netdev))) skb_hwtstamps(skb)->hwtstamp = be64_to_cpu(rxreq->timestamp); trace_funeth_rx(q, rxreq->nsgl, pkt_len, skb->hash, cv); gro_res = skb->data_len ? napi_gro_frags(q->napi) : napi_gro_receive(q->napi, skb); if (gro_res == GRO_MERGED || gro_res == GRO_MERGED_FREE) FUN_QSTAT_INC(q, gro_merged); else if (gro_res == GRO_HELD) FUN_QSTAT_INC(q, gro_pkts); return; no_mem: FUN_QSTAT_INC(q, rx_mem_drops); /* Release the references we've been granted for the frag pages. * We return the ref of the last frag and free the rest. */ q->cur_buf->pg_refs++; for (i = 0; i < rxreq->nsgl - 1; i++) __free_page(skb_frag_page(frags + i)); } /* Return 0 if the phase tag of the CQE at the CQ's head matches expectations * indicating the CQE is new. */ static u16 cqe_phase_mismatch(const struct fun_cqe_info *ci, u16 phase) { u16 sf_p = be16_to_cpu(ci->sf_p); return (sf_p & 1) ^ phase; } /* Walk through a CQ identifying and processing fresh CQEs up to the given * budget. Return the remaining budget. */ static int fun_process_cqes(struct funeth_rxq *q, int budget) { struct funeth_priv *fp = netdev_priv(q->netdev); struct funeth_txq **xdpqs, *xdp_q = NULL; xdpqs = rcu_dereference_bh(fp->xdpqs); if (xdpqs) xdp_q = xdpqs[smp_processor_id()]; while (budget && !cqe_phase_mismatch(q->next_cqe_info, q->phase)) { /* access other descriptor fields after the phase check */ dma_rmb(); fun_handle_cqe_pkt(q, xdp_q); budget--; } if (unlikely(q->xdp_flush)) { if (q->xdp_flush & FUN_XDP_FLUSH_TX) fun_txq_wr_db(xdp_q); if (q->xdp_flush & FUN_XDP_FLUSH_REDIR) xdp_do_flush(); q->xdp_flush = 0; } return budget; } /* NAPI handler for Rx queues. Calls the CQE processing loop and writes RQ/CQ * doorbells as needed. */ int fun_rxq_napi_poll(struct napi_struct *napi, int budget) { struct fun_irq *irq = container_of(napi, struct fun_irq, napi); struct funeth_rxq *q = irq->rxq; int work_done = budget - fun_process_cqes(q, budget); u32 cq_db_val = q->cq_head; if (unlikely(work_done >= budget)) FUN_QSTAT_INC(q, rx_budget); else if (napi_complete_done(napi, work_done)) cq_db_val |= q->irq_db_val; /* check whether to post new Rx buffers */ if (q->rq_cons - q->rq_cons_db >= q->rq_db_thres) { u64_stats_update_begin(&q->syncp); q->stats.rx_bufs += q->rq_cons - q->rq_cons_db; u64_stats_update_end(&q->syncp); q->rq_cons_db = q->rq_cons; writel((q->rq_cons - 1) & q->rq_mask, q->rq_db); } writel(cq_db_val, q->cq_db); return work_done; } /* Free the Rx buffers of an Rx queue. */ static void fun_rxq_free_bufs(struct funeth_rxq *q) { struct funeth_rxbuf *b = q->bufs; unsigned int i; for (i = 0; i <= q->rq_mask; i++, b++) funeth_free_page(q, b); funeth_free_page(q, &q->spare_buf); q->cur_buf = NULL; } /* Initially provision an Rx queue with Rx buffers. */ static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node) { struct funeth_rxbuf *b = q->bufs; unsigned int i; for (i = 0; i <= q->rq_mask; i++, b++) { if (funeth_alloc_page(q, b, node, GFP_KERNEL)) { fun_rxq_free_bufs(q); return -ENOMEM; } q->rqes[i] = FUN_EPRQ_RQBUF_INIT(b->dma_addr); } q->cur_buf = q->bufs; return 0; } /* Initialize a used-buffer cache of the given depth. */ static int fun_rxq_init_cache(struct funeth_rx_cache *c, unsigned int depth, int node) { c->mask = depth - 1; c->bufs = kvzalloc_node(depth * sizeof(*c->bufs), GFP_KERNEL, node); return c->bufs ? 0 : -ENOMEM; } /* Deallocate an Rx queue's used-buffer cache and its contents. */ static void fun_rxq_free_cache(struct funeth_rxq *q) { struct funeth_rxbuf *b = q->cache.bufs; unsigned int i; for (i = 0; i <= q->cache.mask; i++, b++) funeth_free_page(q, b); kvfree(q->cache.bufs); q->cache.bufs = NULL; } int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog) { struct funeth_priv *fp = netdev_priv(q->netdev); struct fun_admin_epcq_req cmd; u16 headroom; int err; headroom = prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM; if (headroom != q->headroom) { cmd.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ, sizeof(cmd)); cmd.u.modify = FUN_ADMIN_EPCQ_MODIFY_REQ_INIT(FUN_ADMIN_SUBOP_MODIFY, 0, q->hw_cqid, headroom); err = fun_submit_admin_sync_cmd(fp->fdev, &cmd.common, NULL, 0, 0); if (err) return err; q->headroom = headroom; } WRITE_ONCE(q->xdp_prog, prog); return 0; } /* Create an Rx queue, allocating the host memory it needs. */ static struct funeth_rxq *fun_rxq_create_sw(struct net_device *dev, unsigned int qidx, unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq) { struct funeth_priv *fp = netdev_priv(dev); struct funeth_rxq *q; int err = -ENOMEM; int numa_node; numa_node = fun_irq_node(irq); q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node); if (!q) goto err; q->qidx = qidx; q->netdev = dev; q->cq_mask = ncqe - 1; q->rq_mask = nrqe - 1; q->numa_node = numa_node; q->rq_db_thres = nrqe / 4; u64_stats_init(&q->syncp); q->dma_dev = &fp->pdev->dev; q->rqes = fun_alloc_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), sizeof(*q->bufs), false, numa_node, &q->rq_dma_addr, (void **)&q->bufs, NULL); if (!q->rqes) goto free_q; q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0, false, numa_node, &q->cq_dma_addr, NULL, NULL); if (!q->cqes) goto free_rqes; err = fun_rxq_init_cache(&q->cache, nrqe, numa_node); if (err) goto free_cqes; err = fun_rxq_alloc_bufs(q, numa_node); if (err) goto free_cache; q->stats.rx_bufs = q->rq_mask; q->init_state = FUN_QSTATE_INIT_SW; return q; free_cache: fun_rxq_free_cache(q); free_cqes: dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes, q->cq_dma_addr); free_rqes: fun_free_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), false, q->rqes, q->rq_dma_addr, q->bufs); free_q: kfree(q); err: netdev_err(dev, "Unable to allocate memory for Rx queue %u\n", qidx); return ERR_PTR(err); } static void fun_rxq_free_sw(struct funeth_rxq *q) { struct funeth_priv *fp = netdev_priv(q->netdev); fun_rxq_free_cache(q); fun_rxq_free_bufs(q); fun_free_ring_mem(q->dma_dev, q->rq_mask + 1, sizeof(*q->rqes), false, q->rqes, q->rq_dma_addr, q->bufs); dma_free_coherent(q->dma_dev, (q->cq_mask + 1) * FUNETH_CQE_SIZE, q->cqes, q->cq_dma_addr); /* Before freeing the queue transfer key counters to the device. */ fp->rx_packets += q->stats.rx_pkts; fp->rx_bytes += q->stats.rx_bytes; fp->rx_dropped += q->stats.rx_map_err + q->stats.rx_mem_drops; kfree(q); } /* Create an Rx queue's resources on the device. */ int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq) { struct funeth_priv *fp = netdev_priv(q->netdev); unsigned int ncqe = q->cq_mask + 1; unsigned int nrqe = q->rq_mask + 1; int err; err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx, irq->napi.napi_id); if (err) goto out; err = xdp_rxq_info_reg_mem_model(&q->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); if (err) goto xdp_unreg; q->phase = 1; q->irq_cnt = 0; q->cq_head = 0; q->rq_cons = 0; q->rq_cons_db = 0; q->buf_offset = 0; q->napi = &irq->napi; q->irq_db_val = fp->cq_irq_db; q->next_cqe_info = cqe_to_info(q->cqes); q->xdp_prog = fp->xdp_prog; q->headroom = fp->xdp_prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM; err = fun_sq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR | FUN_ADMIN_EPSQ_CREATE_FLAG_RQ, 0, FUN_HCI_ID_INVALID, 0, nrqe, q->rq_dma_addr, 0, 0, 0, 0, fp->fdev->kern_end_qid, PAGE_SHIFT, &q->hw_sqid, &q->rq_db); if (err) goto xdp_unreg; err = fun_cq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR | FUN_ADMIN_EPCQ_CREATE_FLAG_RQ, 0, q->hw_sqid, ilog2(FUNETH_CQE_SIZE), ncqe, q->cq_dma_addr, q->headroom, FUN_RX_TAILROOM, 0, 0, irq->irq_idx, 0, fp->fdev->kern_end_qid, &q->hw_cqid, &q->cq_db); if (err) goto free_rq; irq->rxq = q; writel(q->rq_mask, q->rq_db); q->init_state = FUN_QSTATE_INIT_FULL; netif_info(fp, ifup, q->netdev, "Rx queue %u, depth %u/%u, HW qid %u/%u, IRQ idx %u, node %d, headroom %u\n", q->qidx, ncqe, nrqe, q->hw_cqid, q->hw_sqid, irq->irq_idx, q->numa_node, q->headroom); return 0; free_rq: fun_destroy_sq(fp->fdev, q->hw_sqid); xdp_unreg: xdp_rxq_info_unreg(&q->xdp_rxq); out: netdev_err(q->netdev, "Failed to create Rx queue %u on device, error %d\n", q->qidx, err); return err; } static void fun_rxq_free_dev(struct funeth_rxq *q) { struct funeth_priv *fp = netdev_priv(q->netdev); struct fun_irq *irq; if (q->init_state < FUN_QSTATE_INIT_FULL) return; irq = container_of(q->napi, struct fun_irq, napi); netif_info(fp, ifdown, q->netdev, "Freeing Rx queue %u (id %u/%u), IRQ %u\n", q->qidx, q->hw_cqid, q->hw_sqid, irq->irq_idx); irq->rxq = NULL; xdp_rxq_info_unreg(&q->xdp_rxq); fun_destroy_sq(fp->fdev, q->hw_sqid); fun_destroy_cq(fp->fdev, q->hw_cqid); q->init_state = FUN_QSTATE_INIT_SW; } /* Create or advance an Rx queue, allocating all the host and device resources * needed to reach the target state. */ int funeth_rxq_create(struct net_device *dev, unsigned int qidx, unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq, int state, struct funeth_rxq **qp) { struct funeth_rxq *q = *qp; int err; if (!q) { q = fun_rxq_create_sw(dev, qidx, ncqe, nrqe, irq); if (IS_ERR(q)) return PTR_ERR(q); } if (q->init_state >= state) goto out; err = fun_rxq_create_dev(q, irq); if (err) { if (!*qp) fun_rxq_free_sw(q); return err; } out: *qp = q; return 0; } /* Free Rx queue resources until it reaches the target state. */ struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state) { if (state < FUN_QSTATE_INIT_FULL) fun_rxq_free_dev(q); if (state == FUN_QSTATE_DESTROYED) { fun_rxq_free_sw(q); q = NULL; } return q; }
linux-master
drivers/net/ethernet/fungible/funeth/funeth_rx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Applied Micro X-Gene SoC Ethernet Driver * * Copyright (c) 2014, Applied Micro Circuits Corporation * Authors: Iyappan Subramanian <[email protected]> * Ravi Patel <[email protected]> * Keyur Chudgar <[email protected]> */ #include <linux/gpio.h> #include "xgene_enet_main.h" #include "xgene_enet_hw.h" #include "xgene_enet_sgmac.h" #include "xgene_enet_xgmac.h" #define RES_ENET_CSR 0 #define RES_RING_CSR 1 #define RES_RING_CMD 2 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) { struct xgene_enet_raw_desc16 *raw_desc; int i; if (!buf_pool) return; for (i = 0; i < buf_pool->slots; i++) { raw_desc = &buf_pool->raw_desc16[i]; /* Hardware expects descriptor in little endian format */ raw_desc->m0 = cpu_to_le64(i | SET_VAL(FPQNUM, buf_pool->dst_ring_num) | SET_VAL(STASH, 3)); } } static u16 xgene_enet_get_data_len(u64 bufdatalen) { u16 hw_len, mask; hw_len = GET_VAL(BUFDATALEN, bufdatalen); if (unlikely(hw_len == 0x7800)) { return 0; } else if (!(hw_len & BIT(14))) { mask = GENMASK(13, 0); return (hw_len & mask) ? (hw_len & mask) : SIZE_16K; } else if (!(hw_len & GENMASK(13, 12))) { mask = GENMASK(11, 0); return (hw_len & mask) ? (hw_len & mask) : SIZE_4K; } else { mask = GENMASK(11, 0); return (hw_len & mask) ? (hw_len & mask) : SIZE_2K; } } static u16 xgene_enet_set_data_len(u32 size) { u16 hw_len; hw_len = (size == SIZE_4K) ? BIT(14) : 0; return hw_len; } static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring *buf_pool, u32 nbuf) { struct xgene_enet_raw_desc16 *raw_desc; struct xgene_enet_pdata *pdata; struct net_device *ndev; dma_addr_t dma_addr; struct device *dev; struct page *page; u32 slots, tail; u16 hw_len; int i; if (unlikely(!buf_pool)) return 0; ndev = buf_pool->ndev; pdata = netdev_priv(ndev); dev = ndev_to_dev(ndev); slots = buf_pool->slots - 1; tail = buf_pool->tail; for (i = 0; i < nbuf; i++) { raw_desc = &buf_pool->raw_desc16[tail]; page = dev_alloc_page(); if (unlikely(!page)) return -ENOMEM; dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(dev, dma_addr))) { put_page(page); return -ENOMEM; } hw_len = xgene_enet_set_data_len(PAGE_SIZE); raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | SET_VAL(BUFDATALEN, hw_len) | SET_BIT(COHERENT)); buf_pool->frag_page[tail] = page; tail = (tail + 1) & slots; } pdata->ring_ops->wr_cmd(buf_pool, nbuf); buf_pool->tail = tail; return 0; } static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, u32 nbuf) { struct sk_buff *skb; struct xgene_enet_raw_desc16 *raw_desc; struct xgene_enet_pdata *pdata; struct net_device *ndev; struct device *dev; dma_addr_t dma_addr; u32 tail = buf_pool->tail; u32 slots = buf_pool->slots - 1; u16 bufdatalen, len; int i; ndev = buf_pool->ndev; dev = ndev_to_dev(buf_pool->ndev); pdata = netdev_priv(ndev); bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); len = XGENE_ENET_STD_MTU; for (i = 0; i < nbuf; i++) { raw_desc = &buf_pool->raw_desc16[tail]; skb = netdev_alloc_skb_ip_align(ndev, len); if (unlikely(!skb)) return -ENOMEM; dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dma_addr)) { netdev_err(ndev, "DMA mapping error\n"); dev_kfree_skb_any(skb); return -EINVAL; } buf_pool->rx_skb[tail] = skb; raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | SET_VAL(BUFDATALEN, bufdatalen) | SET_BIT(COHERENT)); tail = (tail + 1) & slots; } pdata->ring_ops->wr_cmd(buf_pool, nbuf); buf_pool->tail = tail; return 0; } static u8 xgene_enet_hdr_len(const void *data) { const struct ethhdr *eth = data; return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN; } static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) { struct device *dev = ndev_to_dev(buf_pool->ndev); struct xgene_enet_raw_desc16 *raw_desc; dma_addr_t dma_addr; int i; /* Free up the buffers held by hardware */ for (i = 0; i < buf_pool->slots; i++) { if (buf_pool->rx_skb[i]) { dev_kfree_skb_any(buf_pool->rx_skb[i]); raw_desc = &buf_pool->raw_desc16[i]; dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)); dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE); } } } static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring *buf_pool) { struct device *dev = ndev_to_dev(buf_pool->ndev); dma_addr_t dma_addr; struct page *page; int i; /* Free up the buffers held by hardware */ for (i = 0; i < buf_pool->slots; i++) { page = buf_pool->frag_page[i]; if (page) { dma_addr = buf_pool->frag_dma_addr[i]; dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); put_page(page); } } } static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) { struct xgene_enet_desc_ring *rx_ring = data; if (napi_schedule_prep(&rx_ring->napi)) { disable_irq_nosync(irq); __napi_schedule(&rx_ring->napi); } return IRQ_HANDLED; } static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, struct xgene_enet_raw_desc *raw_desc) { struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev); struct sk_buff *skb; struct device *dev; skb_frag_t *frag; dma_addr_t *frag_dma_addr; u16 skb_index; u8 mss_index; u8 status; int i; skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); skb = cp_ring->cp_skb[skb_index]; frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS]; dev = ndev_to_dev(cp_ring->ndev); dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), skb_headlen(skb), DMA_TO_DEVICE); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag), DMA_TO_DEVICE); } if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) { mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3)); spin_lock(&pdata->mss_lock); pdata->mss_refcnt[mss_index]--; spin_unlock(&pdata->mss_lock); } /* Checking for error */ status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); if (unlikely(status > 2)) { cp_ring->tx_dropped++; cp_ring->tx_errors++; } if (likely(skb)) { dev_kfree_skb_any(skb); } else { netdev_err(cp_ring->ndev, "completion skb is NULL\n"); } return 0; } static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); int mss_index = -EBUSY; int i; spin_lock(&pdata->mss_lock); /* Reuse the slot if MSS matches */ for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) { if (pdata->mss[i] == mss) { pdata->mss_refcnt[i]++; mss_index = i; } } /* Overwrite the slot with ref_count = 0 */ for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) { if (!pdata->mss_refcnt[i]) { pdata->mss_refcnt[i]++; pdata->mac_ops->set_mss(pdata, mss, i); pdata->mss[i] = mss; mss_index = i; } } spin_unlock(&pdata->mss_lock); return mss_index; } static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo) { struct net_device *ndev = skb->dev; struct iphdr *iph; u8 l3hlen = 0, l4hlen = 0; u8 ethhdr, proto = 0, csum_enable = 0; u32 hdr_len, mss = 0; u32 i, len, nr_frags; int mss_index; ethhdr = xgene_enet_hdr_len(skb->data); if (unlikely(skb->protocol != htons(ETH_P_IP)) && unlikely(skb->protocol != htons(ETH_P_8021Q))) goto out; if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM))) goto out; iph = ip_hdr(skb); if (unlikely(ip_is_fragment(iph))) goto out; if (likely(iph->protocol == IPPROTO_TCP)) { l4hlen = tcp_hdrlen(skb) >> 2; csum_enable = 1; proto = TSO_IPPROTO_TCP; if (ndev->features & NETIF_F_TSO) { hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb); mss = skb_shinfo(skb)->gso_size; if (skb_is_nonlinear(skb)) { len = skb_headlen(skb); nr_frags = skb_shinfo(skb)->nr_frags; for (i = 0; i < 2 && i < nr_frags; i++) len += skb_frag_size( &skb_shinfo(skb)->frags[i]); /* HW requires header must reside in 3 buffer */ if (unlikely(hdr_len > len)) { if (skb_linearize(skb)) return 0; } } if (!mss || ((skb->len - hdr_len) <= mss)) goto out; mss_index = xgene_enet_setup_mss(ndev, mss); if (unlikely(mss_index < 0)) return -EBUSY; *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index); } } else if (iph->protocol == IPPROTO_UDP) { l4hlen = UDP_HDR_SIZE; csum_enable = 1; } out: l3hlen = ip_hdrlen(skb) >> 2; *hopinfo |= SET_VAL(TCPHDR, l4hlen) | SET_VAL(IPHDR, l3hlen) | SET_VAL(ETHHDR, ethhdr) | SET_VAL(EC, csum_enable) | SET_VAL(IS, proto) | SET_BIT(IC) | SET_BIT(TYPE_ETH_WORK_MESSAGE); return 0; } static u16 xgene_enet_encode_len(u16 len) { return (len == BUFLEN_16K) ? 0 : len; } static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len) { desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) | SET_VAL(BUFDATALEN, len)); } static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring) { __le64 *exp_bufs; exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS]; memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS); ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1); return exp_bufs; } static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring) { return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS]; } static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, struct sk_buff *skb) { struct device *dev = ndev_to_dev(tx_ring->ndev); struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev); struct xgene_enet_raw_desc *raw_desc; __le64 *exp_desc = NULL, *exp_bufs = NULL; dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; skb_frag_t *frag; u16 tail = tx_ring->tail; u64 hopinfo = 0; u32 len, hw_len; u8 ll = 0, nv = 0, idx = 0; bool split = false; u32 size, offset, ell_bytes = 0; u32 i, fidx, nr_frags, count = 1; int ret; raw_desc = &tx_ring->raw_desc[tail]; tail = (tail + 1) & (tx_ring->slots - 1); memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc)); ret = xgene_enet_work_msg(skb, &hopinfo); if (ret) return ret; raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) | hopinfo); len = skb_headlen(skb); hw_len = xgene_enet_encode_len(len); dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { netdev_err(tx_ring->ndev, "DMA mapping error\n"); return -EINVAL; } /* Hardware expects descriptor in little endian format */ raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | SET_VAL(BUFDATALEN, hw_len) | SET_BIT(COHERENT)); if (!skb_is_nonlinear(skb)) goto out; /* scatter gather */ nv = 1; exp_desc = (void *)&tx_ring->raw_desc[tail]; tail = (tail + 1) & (tx_ring->slots - 1); memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc)); nr_frags = skb_shinfo(skb)->nr_frags; for (i = nr_frags; i < 4 ; i++) exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER); frag_dma_addr = xgene_get_frag_dma_array(tx_ring); for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) { if (!split) { frag = &skb_shinfo(skb)->frags[fidx]; size = skb_frag_size(frag); offset = 0; pbuf_addr = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, pbuf_addr)) return -EINVAL; frag_dma_addr[fidx] = pbuf_addr; fidx++; if (size > BUFLEN_16K) split = true; } if (size > BUFLEN_16K) { len = BUFLEN_16K; size -= BUFLEN_16K; } else { len = size; split = false; } dma_addr = pbuf_addr + offset; hw_len = xgene_enet_encode_len(len); switch (i) { case 0: case 1: case 2: xgene_set_addr_len(exp_desc, i, dma_addr, hw_len); break; case 3: if (split || (fidx != nr_frags)) { exp_bufs = xgene_enet_get_exp_bufs(tx_ring); xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len); idx++; ell_bytes += len; } else { xgene_set_addr_len(exp_desc, i, dma_addr, hw_len); } break; default: xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len); idx++; ell_bytes += len; break; } if (split) offset += BUFLEN_16K; } count++; if (idx) { ll = 1; dma_addr = dma_map_single(dev, exp_bufs, sizeof(u64) * MAX_EXP_BUFFS, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { dev_kfree_skb_any(skb); return -EINVAL; } i = ell_bytes >> LL_BYTES_LSB_LEN; exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | SET_VAL(LL_BYTES_MSB, i) | SET_VAL(LL_LEN, idx)); raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes)); } out: raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | SET_VAL(USERINFO, tx_ring->tail)); tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; pdata->tx_level[tx_ring->cp_ring->index] += count; tx_ring->tail = tail; return count; } static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_desc_ring *tx_ring; int index = skb->queue_mapping; u32 tx_level = pdata->tx_level[index]; int count; tx_ring = pdata->tx_ring[index]; if (tx_level < pdata->txc_level[index]) tx_level += ((typeof(pdata->tx_level[index]))~0U); if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) { netif_stop_subqueue(ndev, index); return NETDEV_TX_BUSY; } if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE)) return NETDEV_TX_OK; count = xgene_enet_setup_tx_desc(tx_ring, skb); if (count == -EBUSY) return NETDEV_TX_BUSY; if (count <= 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } skb_tx_timestamp(skb); tx_ring->tx_packets++; tx_ring->tx_bytes += skb->len; pdata->ring_ops->wr_cmd(tx_ring, count); return NETDEV_TX_OK; } static void xgene_enet_rx_csum(struct sk_buff *skb) { struct net_device *ndev = skb->dev; struct iphdr *iph = ip_hdr(skb); if (!(ndev->features & NETIF_F_RXCSUM)) return; if (skb->protocol != htons(ETH_P_IP)) return; if (ip_is_fragment(iph)) return; if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP) return; skb->ip_summed = CHECKSUM_UNNECESSARY; } static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool, struct xgene_enet_raw_desc *raw_desc, struct xgene_enet_raw_desc *exp_desc) { __le64 *desc = (void *)exp_desc; dma_addr_t dma_addr; struct device *dev; struct page *page; u16 slots, head; u32 frag_size; int i; if (!buf_pool || !raw_desc || !exp_desc || (!GET_VAL(NV, le64_to_cpu(raw_desc->m0)))) return; dev = ndev_to_dev(buf_pool->ndev); slots = buf_pool->slots - 1; head = buf_pool->head; for (i = 0; i < 4; i++) { frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1])); if (!frag_size) break; dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1])); dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); page = buf_pool->frag_page[head]; put_page(page); buf_pool->frag_page[head] = NULL; head = (head + 1) & slots; } buf_pool->head = head; } /* Errata 10GE_10 and ENET_15 - Fix duplicated HW statistic counters */ static bool xgene_enet_errata_10GE_10(struct sk_buff *skb, u32 len, u8 status) { if (status == INGRESS_CRC && len >= (ETHER_STD_PACKET + 1) && len <= (ETHER_STD_PACKET + 4) && skb->protocol == htons(ETH_P_8021Q)) return true; return false; } /* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */ static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status) { if (status == INGRESS_PKT_LEN && len == ETHER_MIN_PACKET) { if (ntohs(eth_hdr(skb)->h_proto) < 46) return true; } return false; } static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, struct xgene_enet_raw_desc *raw_desc, struct xgene_enet_raw_desc *exp_desc) { struct xgene_enet_desc_ring *buf_pool, *page_pool; u32 datalen, frag_size, skb_index; struct xgene_enet_pdata *pdata; struct net_device *ndev; dma_addr_t dma_addr; struct sk_buff *skb; struct device *dev; struct page *page; u16 slots, head; int i, ret = 0; __le64 *desc; u8 status; bool nv; ndev = rx_ring->ndev; pdata = netdev_priv(ndev); dev = ndev_to_dev(rx_ring->ndev); buf_pool = rx_ring->buf_pool; page_pool = rx_ring->page_pool; dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), XGENE_ENET_STD_MTU, DMA_FROM_DEVICE); skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); skb = buf_pool->rx_skb[skb_index]; buf_pool->rx_skb[skb_index] = NULL; datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1)); /* strip off CRC as HW isn't doing this */ nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0)); if (!nv) datalen -= 4; skb_put(skb, datalen); prefetch(skb->data - NET_IP_ALIGN); skb->protocol = eth_type_trans(skb, ndev); /* checking for error */ status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) | GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); if (unlikely(status)) { if (xgene_enet_errata_10GE_8(skb, datalen, status)) { pdata->false_rflr++; } else if (xgene_enet_errata_10GE_10(skb, datalen, status)) { pdata->vlan_rjbr++; } else { dev_kfree_skb_any(skb); xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc); xgene_enet_parse_error(rx_ring, status); rx_ring->rx_dropped++; goto out; } } if (!nv) goto skip_jumbo; slots = page_pool->slots - 1; head = page_pool->head; desc = (void *)exp_desc; for (i = 0; i < 4; i++) { frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1])); if (!frag_size) break; dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1])); dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); page = page_pool->frag_page[head]; skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, frag_size, PAGE_SIZE); datalen += frag_size; page_pool->frag_page[head] = NULL; head = (head + 1) & slots; } page_pool->head = head; rx_ring->npagepool -= skb_shinfo(skb)->nr_frags; skip_jumbo: skb_checksum_none_assert(skb); xgene_enet_rx_csum(skb); rx_ring->rx_packets++; rx_ring->rx_bytes += datalen; napi_gro_receive(&rx_ring->napi, skb); out: if (rx_ring->npagepool <= 0) { ret = xgene_enet_refill_pagepool(page_pool, NUM_NXTBUFPOOL); rx_ring->npagepool = NUM_NXTBUFPOOL; if (ret) return ret; } if (--rx_ring->nbufpool == 0) { ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL); rx_ring->nbufpool = NUM_BUFPOOL; } return ret; } static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc) { return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false; } static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, int budget) { struct net_device *ndev = ring->ndev; struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_raw_desc *raw_desc, *exp_desc; u16 head = ring->head; u16 slots = ring->slots - 1; int ret, desc_count, count = 0, processed = 0; bool is_completion; do { raw_desc = &ring->raw_desc[head]; desc_count = 0; is_completion = false; exp_desc = NULL; if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) break; /* read fpqnum field after dataaddr field */ dma_rmb(); if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) { head = (head + 1) & slots; exp_desc = &ring->raw_desc[head]; if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) { head = (head - 1) & slots; break; } dma_rmb(); count++; desc_count++; } if (is_rx_desc(raw_desc)) { ret = xgene_enet_rx_frame(ring, raw_desc, exp_desc); } else { ret = xgene_enet_tx_completion(ring, raw_desc); is_completion = true; } xgene_enet_mark_desc_slot_empty(raw_desc); if (exp_desc) xgene_enet_mark_desc_slot_empty(exp_desc); head = (head + 1) & slots; count++; desc_count++; processed++; if (is_completion) pdata->txc_level[ring->index] += desc_count; if (ret) break; } while (--budget); if (likely(count)) { pdata->ring_ops->wr_cmd(ring, -count); ring->head = head; if (__netif_subqueue_stopped(ndev, ring->index)) netif_start_subqueue(ndev, ring->index); } return processed; } static int xgene_enet_napi(struct napi_struct *napi, const int budget) { struct xgene_enet_desc_ring *ring; int processed; ring = container_of(napi, struct xgene_enet_desc_ring, napi); processed = xgene_enet_process_ring(ring, budget); if (processed != budget) { napi_complete_done(napi, processed); enable_irq(ring->irq); } return processed; } static void xgene_enet_timeout(struct net_device *ndev, unsigned int txqueue) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct netdev_queue *txq; int i; pdata->mac_ops->reset(pdata); for (i = 0; i < pdata->txq_cnt; i++) { txq = netdev_get_tx_queue(ndev, i); txq_trans_cond_update(txq); netif_tx_start_queue(txq); } } static void xgene_enet_set_irq_name(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_desc_ring *ring; int i; for (i = 0; i < pdata->rxq_cnt; i++) { ring = pdata->rx_ring[i]; if (!pdata->cq_cnt) { snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", ndev->name); } else { snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d", ndev->name, i); } } for (i = 0; i < pdata->cq_cnt; i++) { ring = pdata->tx_ring[i]->cp_ring; snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d", ndev->name, i); } } static int xgene_enet_register_irq(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct device *dev = ndev_to_dev(ndev); struct xgene_enet_desc_ring *ring; int ret = 0, i; xgene_enet_set_irq_name(ndev); for (i = 0; i < pdata->rxq_cnt; i++) { ring = pdata->rx_ring[i]; irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, 0, ring->irq_name, ring); if (ret) { netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name); } } for (i = 0; i < pdata->cq_cnt; i++) { ring = pdata->tx_ring[i]->cp_ring; irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, 0, ring->irq_name, ring); if (ret) { netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name); } } return ret; } static void xgene_enet_free_irq(struct net_device *ndev) { struct xgene_enet_pdata *pdata; struct xgene_enet_desc_ring *ring; struct device *dev; int i; pdata = netdev_priv(ndev); dev = ndev_to_dev(ndev); for (i = 0; i < pdata->rxq_cnt; i++) { ring = pdata->rx_ring[i]; irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); devm_free_irq(dev, ring->irq, ring); } for (i = 0; i < pdata->cq_cnt; i++) { ring = pdata->tx_ring[i]->cp_ring; irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); devm_free_irq(dev, ring->irq, ring); } } static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) { struct napi_struct *napi; int i; for (i = 0; i < pdata->rxq_cnt; i++) { napi = &pdata->rx_ring[i]->napi; napi_enable(napi); } for (i = 0; i < pdata->cq_cnt; i++) { napi = &pdata->tx_ring[i]->cp_ring->napi; napi_enable(napi); } } static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata) { struct napi_struct *napi; int i; for (i = 0; i < pdata->rxq_cnt; i++) { napi = &pdata->rx_ring[i]->napi; napi_disable(napi); } for (i = 0; i < pdata->cq_cnt; i++) { napi = &pdata->tx_ring[i]->cp_ring->napi; napi_disable(napi); } } static int xgene_enet_open(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); const struct xgene_mac_ops *mac_ops = pdata->mac_ops; int ret; ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt); if (ret) return ret; ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt); if (ret) return ret; xgene_enet_napi_enable(pdata); ret = xgene_enet_register_irq(ndev); if (ret) { xgene_enet_napi_disable(pdata); return ret; } if (ndev->phydev) { phy_start(ndev->phydev); } else { schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF); netif_carrier_off(ndev); } mac_ops->tx_enable(pdata); mac_ops->rx_enable(pdata); netif_tx_start_all_queues(ndev); return ret; } static int xgene_enet_close(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); const struct xgene_mac_ops *mac_ops = pdata->mac_ops; int i; netif_tx_stop_all_queues(ndev); mac_ops->tx_disable(pdata); mac_ops->rx_disable(pdata); if (ndev->phydev) phy_stop(ndev->phydev); else cancel_delayed_work_sync(&pdata->link_work); xgene_enet_free_irq(ndev); xgene_enet_napi_disable(pdata); for (i = 0; i < pdata->rxq_cnt; i++) xgene_enet_process_ring(pdata->rx_ring[i], -1); return 0; } static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) { struct xgene_enet_pdata *pdata; struct device *dev; pdata = netdev_priv(ring->ndev); dev = ndev_to_dev(ring->ndev); pdata->ring_ops->clear(ring); dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); } static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) { struct xgene_enet_desc_ring *buf_pool, *page_pool; struct xgene_enet_desc_ring *ring; int i; for (i = 0; i < pdata->txq_cnt; i++) { ring = pdata->tx_ring[i]; if (ring) { xgene_enet_delete_ring(ring); pdata->port_ops->clear(pdata, ring); if (pdata->cq_cnt) xgene_enet_delete_ring(ring->cp_ring); pdata->tx_ring[i] = NULL; } } for (i = 0; i < pdata->rxq_cnt; i++) { ring = pdata->rx_ring[i]; if (ring) { page_pool = ring->page_pool; if (page_pool) { xgene_enet_delete_pagepool(page_pool); xgene_enet_delete_ring(page_pool); pdata->port_ops->clear(pdata, page_pool); } buf_pool = ring->buf_pool; xgene_enet_delete_bufpool(buf_pool); xgene_enet_delete_ring(buf_pool); pdata->port_ops->clear(pdata, buf_pool); xgene_enet_delete_ring(ring); pdata->rx_ring[i] = NULL; } } } static int xgene_enet_get_ring_size(struct device *dev, enum xgene_enet_ring_cfgsize cfgsize) { int size = -EINVAL; switch (cfgsize) { case RING_CFGSIZE_512B: size = 0x200; break; case RING_CFGSIZE_2KB: size = 0x800; break; case RING_CFGSIZE_16KB: size = 0x4000; break; case RING_CFGSIZE_64KB: size = 0x10000; break; case RING_CFGSIZE_512KB: size = 0x80000; break; default: dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize); break; } return size; } static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring) { struct xgene_enet_pdata *pdata; struct device *dev; if (!ring) return; dev = ndev_to_dev(ring->ndev); pdata = netdev_priv(ring->ndev); if (ring->desc_addr) { pdata->ring_ops->clear(ring); dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); } devm_kfree(dev, ring); } static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) { struct xgene_enet_desc_ring *page_pool; struct device *dev = &pdata->pdev->dev; struct xgene_enet_desc_ring *ring; void *p; int i; for (i = 0; i < pdata->txq_cnt; i++) { ring = pdata->tx_ring[i]; if (ring) { if (ring->cp_ring && ring->cp_ring->cp_skb) devm_kfree(dev, ring->cp_ring->cp_skb); if (ring->cp_ring && pdata->cq_cnt) xgene_enet_free_desc_ring(ring->cp_ring); xgene_enet_free_desc_ring(ring); } } for (i = 0; i < pdata->rxq_cnt; i++) { ring = pdata->rx_ring[i]; if (ring) { if (ring->buf_pool) { if (ring->buf_pool->rx_skb) devm_kfree(dev, ring->buf_pool->rx_skb); xgene_enet_free_desc_ring(ring->buf_pool); } page_pool = ring->page_pool; if (page_pool) { p = page_pool->frag_page; if (p) devm_kfree(dev, p); p = page_pool->frag_dma_addr; if (p) devm_kfree(dev, p); } xgene_enet_free_desc_ring(ring); } } } static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata, struct xgene_enet_desc_ring *ring) { if ((pdata->enet_id == XGENE_ENET2) && (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) { return true; } return false; } static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata, struct xgene_enet_desc_ring *ring) { u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift; return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift); } static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( struct net_device *ndev, u32 ring_num, enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct device *dev = ndev_to_dev(ndev); struct xgene_enet_desc_ring *ring; void *irq_mbox_addr; int size; size = xgene_enet_get_ring_size(dev, cfgsize); if (size < 0) return NULL; ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring), GFP_KERNEL); if (!ring) return NULL; ring->ndev = ndev; ring->num = ring_num; ring->cfgsize = cfgsize; ring->id = ring_id; ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma, GFP_KERNEL | __GFP_ZERO); if (!ring->desc_addr) { devm_kfree(dev, ring); return NULL; } ring->size = size; if (is_irq_mbox_required(pdata, ring)) { irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE, &ring->irq_mbox_dma, GFP_KERNEL | __GFP_ZERO); if (!irq_mbox_addr) { dmam_free_coherent(dev, size, ring->desc_addr, ring->dma); devm_kfree(dev, ring); return NULL; } ring->irq_mbox_addr = irq_mbox_addr; } ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring); ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR; ring = pdata->ring_ops->setup(ring); netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n", ring->num, ring->size, ring->id, ring->slots); return ring; } static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum) { return (owner << 6) | (bufnum & GENMASK(5, 0)); } static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p) { enum xgene_ring_owner owner; if (p->enet_id == XGENE_ENET1) { switch (p->phy_mode) { case PHY_INTERFACE_MODE_SGMII: owner = RING_OWNER_ETH0; break; default: owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1; break; } } else { owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1; } return owner; } static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; u32 cpu_bufnum; int ret; ret = device_property_read_u32(dev, "channel", &cpu_bufnum); return (!ret) ? cpu_bufnum : pdata->cpu_bufnum; } static int xgene_enet_create_desc_rings(struct net_device *ndev) { struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_desc_ring *page_pool = NULL; struct xgene_enet_desc_ring *buf_pool = NULL; struct device *dev = ndev_to_dev(ndev); u8 eth_bufnum = pdata->eth_bufnum; u8 bp_bufnum = pdata->bp_bufnum; u16 ring_num = pdata->ring_num; enum xgene_ring_owner owner; dma_addr_t dma_exp_bufs; u16 ring_id, slots; __le64 *exp_bufs; int i, ret, size; u8 cpu_bufnum; cpu_bufnum = xgene_start_cpu_bufnum(pdata); for (i = 0; i < pdata->rxq_cnt; i++) { /* allocate rx descriptor ring */ owner = xgene_derive_ring_owner(pdata); ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, RING_CFGSIZE_16KB, ring_id); if (!rx_ring) { ret = -ENOMEM; goto err; } /* allocate buffer pool for receiving packets */ owner = xgene_derive_ring_owner(pdata); ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, RING_CFGSIZE_16KB, ring_id); if (!buf_pool) { ret = -ENOMEM; goto err; } rx_ring->nbufpool = NUM_BUFPOOL; rx_ring->npagepool = NUM_NXTBUFPOOL; rx_ring->irq = pdata->irqs[i]; buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, sizeof(struct sk_buff *), GFP_KERNEL); if (!buf_pool->rx_skb) { ret = -ENOMEM; goto err; } buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); rx_ring->buf_pool = buf_pool; pdata->rx_ring[i] = rx_ring; if ((pdata->enet_id == XGENE_ENET1 && pdata->rxq_cnt > 4) || (pdata->enet_id == XGENE_ENET2 && pdata->rxq_cnt > 16)) { break; } /* allocate next buffer pool for jumbo packets */ owner = xgene_derive_ring_owner(pdata); ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); page_pool = xgene_enet_create_desc_ring(ndev, ring_num++, RING_CFGSIZE_16KB, ring_id); if (!page_pool) { ret = -ENOMEM; goto err; } slots = page_pool->slots; page_pool->frag_page = devm_kcalloc(dev, slots, sizeof(struct page *), GFP_KERNEL); if (!page_pool->frag_page) { ret = -ENOMEM; goto err; } page_pool->frag_dma_addr = devm_kcalloc(dev, slots, sizeof(dma_addr_t), GFP_KERNEL); if (!page_pool->frag_dma_addr) { ret = -ENOMEM; goto err; } page_pool->dst_ring_num = xgene_enet_dst_ring_num(page_pool); rx_ring->page_pool = page_pool; } for (i = 0; i < pdata->txq_cnt; i++) { /* allocate tx descriptor ring */ owner = xgene_derive_ring_owner(pdata); ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++); tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, RING_CFGSIZE_16KB, ring_id); if (!tx_ring) { ret = -ENOMEM; goto err; } size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs, GFP_KERNEL | __GFP_ZERO); if (!exp_bufs) { ret = -ENOMEM; goto err; } tx_ring->exp_bufs = exp_bufs; pdata->tx_ring[i] = tx_ring; if (!pdata->cq_cnt) { cp_ring = pdata->rx_ring[i]; } else { /* allocate tx completion descriptor ring */ ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++, RING_CFGSIZE_16KB, ring_id); if (!cp_ring) { ret = -ENOMEM; goto err; } cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i]; cp_ring->index = i; } cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, sizeof(struct sk_buff *), GFP_KERNEL); if (!cp_ring->cp_skb) { ret = -ENOMEM; goto err; } size = sizeof(dma_addr_t) * MAX_SKB_FRAGS; cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots, size, GFP_KERNEL); if (!cp_ring->frag_dma_addr) { devm_kfree(dev, cp_ring->cp_skb); ret = -ENOMEM; goto err; } tx_ring->cp_ring = cp_ring; tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); } if (pdata->ring_ops->coalesce) pdata->ring_ops->coalesce(pdata->tx_ring[0]); pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; return 0; err: xgene_enet_free_desc_rings(pdata); return ret; } static void xgene_enet_get_stats64( struct net_device *ndev, struct rtnl_link_stats64 *stats) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_desc_ring *ring; int i; for (i = 0; i < pdata->txq_cnt; i++) { ring = pdata->tx_ring[i]; if (ring) { stats->tx_packets += ring->tx_packets; stats->tx_bytes += ring->tx_bytes; stats->tx_dropped += ring->tx_dropped; stats->tx_errors += ring->tx_errors; } } for (i = 0; i < pdata->rxq_cnt; i++) { ring = pdata->rx_ring[i]; if (ring) { stats->rx_packets += ring->rx_packets; stats->rx_bytes += ring->rx_bytes; stats->rx_dropped += ring->rx_dropped; stats->rx_errors += ring->rx_errors + ring->rx_length_errors + ring->rx_crc_errors + ring->rx_frame_errors + ring->rx_fifo_errors; stats->rx_length_errors += ring->rx_length_errors; stats->rx_crc_errors += ring->rx_crc_errors; stats->rx_frame_errors += ring->rx_frame_errors; stats->rx_fifo_errors += ring->rx_fifo_errors; } } } static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); int ret; ret = eth_mac_addr(ndev, addr); if (ret) return ret; pdata->mac_ops->set_mac_addr(pdata); return ret; } static int xgene_change_mtu(struct net_device *ndev, int new_mtu) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); int frame_size; if (!netif_running(ndev)) return 0; frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600; xgene_enet_close(ndev); ndev->mtu = new_mtu; pdata->mac_ops->set_framesize(pdata, frame_size); xgene_enet_open(ndev); return 0; } static const struct net_device_ops xgene_ndev_ops = { .ndo_open = xgene_enet_open, .ndo_stop = xgene_enet_close, .ndo_start_xmit = xgene_enet_start_xmit, .ndo_tx_timeout = xgene_enet_timeout, .ndo_get_stats64 = xgene_enet_get_stats64, .ndo_change_mtu = xgene_change_mtu, .ndo_set_mac_address = xgene_enet_set_mac_address, }; #ifdef CONFIG_ACPI static void xgene_get_port_id_acpi(struct device *dev, struct xgene_enet_pdata *pdata) { acpi_status status; u64 temp; status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp); if (ACPI_FAILURE(status)) { pdata->port_id = 0; } else { pdata->port_id = temp; } return; } #endif static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata) { u32 id = 0; of_property_read_u32(dev->of_node, "port-id", &id); pdata->port_id = id & BIT(0); return; } static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; int delay, ret; ret = device_property_read_u32(dev, "tx-delay", &delay); if (ret) { pdata->tx_delay = 4; return 0; } if (delay < 0 || delay > 7) { dev_err(dev, "Invalid tx-delay specified\n"); return -EINVAL; } pdata->tx_delay = delay; return 0; } static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; int delay, ret; ret = device_property_read_u32(dev, "rx-delay", &delay); if (ret) { pdata->rx_delay = 2; return 0; } if (delay < 0 || delay > 7) { dev_err(dev, "Invalid rx-delay specified\n"); return -EINVAL; } pdata->rx_delay = delay; return 0; } static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) { struct platform_device *pdev = pdata->pdev; int i, ret, max_irqs; if (phy_interface_mode_is_rgmii(pdata->phy_mode)) max_irqs = 1; else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) max_irqs = 2; else max_irqs = XGENE_MAX_ENET_IRQ; for (i = 0; i < max_irqs; i++) { ret = platform_get_irq(pdev, i); if (ret < 0) { if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { max_irqs = i; pdata->rxq_cnt = max_irqs / 2; pdata->txq_cnt = max_irqs / 2; pdata->cq_cnt = max_irqs / 2; break; } return ret; } pdata->irqs[i] = ret; } return 0; } static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata) { int ret; if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) return; if (!IS_ENABLED(CONFIG_MDIO_XGENE)) return; ret = xgene_enet_phy_connect(pdata->ndev); if (!ret) pdata->mdio_driver = true; } static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; pdata->sfp_gpio_en = false; if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII || (!device_property_present(dev, "sfp-gpios") && !device_property_present(dev, "rxlos-gpios"))) return; pdata->sfp_gpio_en = true; pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN); if (IS_ERR(pdata->sfp_rdy)) pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN); } static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) { struct platform_device *pdev; struct net_device *ndev; struct device *dev; struct resource *res; void __iomem *base_addr; u32 offset; int ret = 0; pdev = pdata->pdev; dev = &pdev->dev; ndev = pdata->ndev; res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR); if (!res) { dev_err(dev, "Resource enet_csr not defined\n"); return -ENODEV; } pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res)); if (!pdata->base_addr) { dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); return -ENOMEM; } res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR); if (!res) { dev_err(dev, "Resource ring_csr not defined\n"); return -ENODEV; } pdata->ring_csr_addr = devm_ioremap(dev, res->start, resource_size(res)); if (!pdata->ring_csr_addr) { dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); return -ENOMEM; } res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD); if (!res) { dev_err(dev, "Resource ring_cmd not defined\n"); return -ENODEV; } pdata->ring_cmd_addr = devm_ioremap(dev, res->start, resource_size(res)); if (!pdata->ring_cmd_addr) { dev_err(dev, "Unable to retrieve ENET Ring command region\n"); return -ENOMEM; } if (dev->of_node) xgene_get_port_id_dt(dev, pdata); #ifdef CONFIG_ACPI else xgene_get_port_id_acpi(dev, pdata); #endif if (device_get_ethdev_address(dev, ndev)) eth_hw_addr_random(ndev); memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); pdata->phy_mode = device_get_phy_mode(dev); if (pdata->phy_mode < 0) { dev_err(dev, "Unable to get phy-connection-type\n"); return pdata->phy_mode; } if (!phy_interface_mode_is_rgmii(pdata->phy_mode) && pdata->phy_mode != PHY_INTERFACE_MODE_SGMII && pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) { dev_err(dev, "Incorrect phy-connection-type specified\n"); return -ENODEV; } ret = xgene_get_tx_delay(pdata); if (ret) return ret; ret = xgene_get_rx_delay(pdata); if (ret) return ret; ret = xgene_enet_get_irqs(pdata); if (ret) return ret; xgene_enet_gpiod_get(pdata); pdata->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pdata->clk)) { if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { /* Abort if the clock is defined but couldn't be * retrived. Always abort if the clock is missing on * DT system as the driver can't cope with this case. */ if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node) return PTR_ERR(pdata->clk); /* Firmware may have set up the clock already. */ dev_info(dev, "clocks have been setup already\n"); } } if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET); else base_addr = pdata->base_addr; pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET; pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; if (phy_interface_mode_is_rgmii(pdata->phy_mode) || pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET; pdata->mcx_stats_addr = pdata->base_addr + BLOCK_ETH_STATS_OFFSET; offset = (pdata->enet_id == XGENE_ENET1) ? BLOCK_ETH_MAC_CSR_OFFSET : X2_BLOCK_ETH_MAC_CSR_OFFSET; pdata->mcx_mac_csr_addr = base_addr + offset; } else { pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; pdata->mcx_stats_addr = base_addr + BLOCK_AXG_STATS_OFFSET; pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET; pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET; } pdata->rx_buff_cnt = NUM_PKT_BUF; return 0; } static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) { struct xgene_enet_cle *enet_cle = &pdata->cle; struct xgene_enet_desc_ring *page_pool; struct net_device *ndev = pdata->ndev; struct xgene_enet_desc_ring *buf_pool; u16 dst_ring_num, ring_id; int i, ret; u32 count; ret = pdata->port_ops->reset(pdata); if (ret) return ret; ret = xgene_enet_create_desc_rings(ndev); if (ret) { netdev_err(ndev, "Error in ring configuration\n"); return ret; } /* setup buffer pool */ for (i = 0; i < pdata->rxq_cnt; i++) { buf_pool = pdata->rx_ring[i]->buf_pool; xgene_enet_init_bufpool(buf_pool); page_pool = pdata->rx_ring[i]->page_pool; xgene_enet_init_bufpool(page_pool); count = pdata->rx_buff_cnt; ret = xgene_enet_refill_bufpool(buf_pool, count); if (ret) goto err; ret = xgene_enet_refill_pagepool(page_pool, count); if (ret) goto err; } dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); buf_pool = pdata->rx_ring[0]->buf_pool; if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { /* Initialize and Enable PreClassifier Tree */ enet_cle->max_nodes = 512; enet_cle->max_dbptrs = 1024; enet_cle->parsers = 3; enet_cle->active_parser = PARSER_ALL; enet_cle->ptree.start_node = 0; enet_cle->ptree.start_dbptr = 0; enet_cle->jump_bytes = 8; ret = pdata->cle_ops->cle_init(pdata); if (ret) { netdev_err(ndev, "Preclass Tree init error\n"); goto err; } } else { dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); buf_pool = pdata->rx_ring[0]->buf_pool; page_pool = pdata->rx_ring[0]->page_pool; ring_id = (page_pool) ? page_pool->id : 0; pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id, ring_id); } ndev->max_mtu = XGENE_ENET_MAX_MTU; pdata->phy_speed = SPEED_UNKNOWN; pdata->mac_ops->init(pdata); return ret; err: xgene_enet_delete_desc_rings(pdata); return ret; } static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) { switch (pdata->phy_mode) { case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: pdata->mac_ops = &xgene_gmac_ops; pdata->port_ops = &xgene_gport_ops; pdata->rm = RM3; pdata->rxq_cnt = 1; pdata->txq_cnt = 1; pdata->cq_cnt = 0; break; case PHY_INTERFACE_MODE_SGMII: pdata->mac_ops = &xgene_sgmac_ops; pdata->port_ops = &xgene_sgport_ops; pdata->rm = RM1; pdata->rxq_cnt = 1; pdata->txq_cnt = 1; pdata->cq_cnt = 1; break; default: pdata->mac_ops = &xgene_xgmac_ops; pdata->port_ops = &xgene_xgport_ops; pdata->cle_ops = &xgene_cle3in_ops; pdata->rm = RM0; if (!pdata->rxq_cnt) { pdata->rxq_cnt = XGENE_NUM_RX_RING; pdata->txq_cnt = XGENE_NUM_TX_RING; pdata->cq_cnt = XGENE_NUM_TXC_RING; } break; } if (pdata->enet_id == XGENE_ENET1) { switch (pdata->port_id) { case 0: if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0; pdata->eth_bufnum = X2_START_ETH_BUFNUM_0; pdata->bp_bufnum = X2_START_BP_BUFNUM_0; pdata->ring_num = START_RING_NUM_0; } else { pdata->cpu_bufnum = START_CPU_BUFNUM_0; pdata->eth_bufnum = START_ETH_BUFNUM_0; pdata->bp_bufnum = START_BP_BUFNUM_0; pdata->ring_num = START_RING_NUM_0; } break; case 1: if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1; pdata->eth_bufnum = XG_START_ETH_BUFNUM_1; pdata->bp_bufnum = XG_START_BP_BUFNUM_1; pdata->ring_num = XG_START_RING_NUM_1; } else { pdata->cpu_bufnum = START_CPU_BUFNUM_1; pdata->eth_bufnum = START_ETH_BUFNUM_1; pdata->bp_bufnum = START_BP_BUFNUM_1; pdata->ring_num = START_RING_NUM_1; } break; default: break; } pdata->ring_ops = &xgene_ring1_ops; } else { switch (pdata->port_id) { case 0: pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0; pdata->eth_bufnum = X2_START_ETH_BUFNUM_0; pdata->bp_bufnum = X2_START_BP_BUFNUM_0; pdata->ring_num = X2_START_RING_NUM_0; break; case 1: pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1; pdata->eth_bufnum = X2_START_ETH_BUFNUM_1; pdata->bp_bufnum = X2_START_BP_BUFNUM_1; pdata->ring_num = X2_START_RING_NUM_1; break; default: break; } pdata->rm = RM0; pdata->ring_ops = &xgene_ring2_ops; } } static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) { struct napi_struct *napi; int i; for (i = 0; i < pdata->rxq_cnt; i++) { napi = &pdata->rx_ring[i]->napi; netif_napi_add(pdata->ndev, napi, xgene_enet_napi); } for (i = 0; i < pdata->cq_cnt; i++) { napi = &pdata->tx_ring[i]->cp_ring->napi; netif_napi_add(pdata->ndev, napi, xgene_enet_napi); } } #ifdef CONFIG_ACPI static const struct acpi_device_id xgene_enet_acpi_match[] = { { "APMC0D05", XGENE_ENET1}, { "APMC0D30", XGENE_ENET1}, { "APMC0D31", XGENE_ENET1}, { "APMC0D3F", XGENE_ENET1}, { "APMC0D26", XGENE_ENET2}, { "APMC0D25", XGENE_ENET2}, { } }; MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); #endif static const struct of_device_id xgene_enet_of_match[] = { {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1}, {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1}, {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1}, {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2}, {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2}, {}, }; MODULE_DEVICE_TABLE(of, xgene_enet_of_match); static int xgene_enet_probe(struct platform_device *pdev) { struct net_device *ndev; struct xgene_enet_pdata *pdata; struct device *dev = &pdev->dev; void (*link_state)(struct work_struct *); const struct of_device_id *of_id; int ret; ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata), XGENE_NUM_TX_RING, XGENE_NUM_RX_RING); if (!ndev) return -ENOMEM; pdata = netdev_priv(ndev); pdata->pdev = pdev; pdata->ndev = ndev; SET_NETDEV_DEV(ndev, dev); platform_set_drvdata(pdev, pdata); ndev->netdev_ops = &xgene_ndev_ops; xgene_enet_set_ethtool_ops(ndev); ndev->features |= NETIF_F_IP_CSUM | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_SG; of_id = of_match_device(xgene_enet_of_match, &pdev->dev); if (of_id) { pdata->enet_id = (uintptr_t)of_id->data; } #ifdef CONFIG_ACPI else { const struct acpi_device_id *acpi_id; acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev); if (acpi_id) pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data; } #endif if (!pdata->enet_id) { ret = -ENODEV; goto err; } ret = xgene_enet_get_resources(pdata); if (ret) goto err; xgene_enet_setup_ops(pdata); spin_lock_init(&pdata->mac_lock); if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM; spin_lock_init(&pdata->mss_lock); } ndev->hw_features = ndev->features; ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (ret) { netdev_err(ndev, "No usable DMA configuration\n"); goto err; } xgene_enet_check_phy_handle(pdata); ret = xgene_enet_init_hw(pdata); if (ret) goto err2; link_state = pdata->mac_ops->link_state; if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { INIT_DELAYED_WORK(&pdata->link_work, link_state); } else if (!pdata->mdio_driver) { if (phy_interface_mode_is_rgmii(pdata->phy_mode)) ret = xgene_enet_mdio_config(pdata); else INIT_DELAYED_WORK(&pdata->link_work, link_state); if (ret) goto err1; } spin_lock_init(&pdata->stats_lock); ret = xgene_extd_stats_init(pdata); if (ret) goto err1; xgene_enet_napi_add(pdata); ret = register_netdev(ndev); if (ret) { netdev_err(ndev, "Failed to register netdev\n"); goto err1; } return 0; err1: /* * If necessary, free_netdev() will call netif_napi_del() and undo * the effects of xgene_enet_napi_add()'s calls to netif_napi_add(). */ xgene_enet_delete_desc_rings(pdata); err2: if (pdata->mdio_driver) xgene_enet_phy_disconnect(pdata); else if (phy_interface_mode_is_rgmii(pdata->phy_mode)) xgene_enet_mdio_remove(pdata); err: free_netdev(ndev); return ret; } static int xgene_enet_remove(struct platform_device *pdev) { struct xgene_enet_pdata *pdata; struct net_device *ndev; pdata = platform_get_drvdata(pdev); ndev = pdata->ndev; rtnl_lock(); if (netif_running(ndev)) dev_close(ndev); rtnl_unlock(); if (pdata->mdio_driver) xgene_enet_phy_disconnect(pdata); else if (phy_interface_mode_is_rgmii(pdata->phy_mode)) xgene_enet_mdio_remove(pdata); unregister_netdev(ndev); xgene_enet_delete_desc_rings(pdata); pdata->port_ops->shutdown(pdata); free_netdev(ndev); return 0; } static void xgene_enet_shutdown(struct platform_device *pdev) { struct xgene_enet_pdata *pdata; pdata = platform_get_drvdata(pdev); if (!pdata) return; if (!pdata->ndev) return; xgene_enet_remove(pdev); } static struct platform_driver xgene_enet_driver = { .driver = { .name = "xgene-enet", .of_match_table = of_match_ptr(xgene_enet_of_match), .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match), }, .probe = xgene_enet_probe, .remove = xgene_enet_remove, .shutdown = xgene_enet_shutdown, }; module_platform_driver(xgene_enet_driver); MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver"); MODULE_AUTHOR("Iyappan Subramanian <[email protected]>"); MODULE_AUTHOR("Keyur Chudgar <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Applied Micro X-Gene SoC Ethernet Driver * * Copyright (c) 2015, Applied Micro Circuits Corporation * Author: Iyappan Subramanian <[email protected]> */ #include "xgene_enet_main.h" #include "xgene_enet_hw.h" #include "xgene_enet_ring2.h" static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) { u32 *ring_cfg = ring->state; u64 addr = ring->dma; if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) { ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK); ring_cfg[3] |= SET_BIT(X2_DEQINTEN); } ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2); addr >>= 8; ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr); addr >>= 27; ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize) | ACCEPTLERR | SET_VAL(RINGADDRH, addr); ring_cfg[4] |= SET_VAL(X2_SELTHRSH, 1); ring_cfg[5] |= SET_BIT(X2_QBASE_AM) | SET_BIT(X2_MSG_AM); } static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) { u32 *ring_cfg = ring->state; bool is_bufpool; u32 val; is_bufpool = xgene_enet_is_bufpool(ring->id); val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR; ring_cfg[4] |= SET_VAL(X2_RINGTYPE, val); if (is_bufpool) ring_cfg[3] |= SET_VAL(RINGMODE, BUFPOOL_MODE); } static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) { u32 *ring_cfg = ring->state; ring_cfg[3] |= RECOMBBUF; ring_cfg[4] |= SET_VAL(X2_RECOMTIMEOUT, 0x7); } static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring, u32 offset, u32 data) { struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); iowrite32(data, pdata->ring_csr_addr + offset); } static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring) { struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); int i; xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num); for (i = 0; i < pdata->ring_ops->num_ring_config; i++) { xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4), ring->state[i]); } } static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring) { memset(ring->state, 0, sizeof(ring->state)); xgene_enet_write_ring_state(ring); } static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring) { enum xgene_ring_owner owner; xgene_enet_ring_set_type(ring); owner = xgene_enet_ring_owner(ring->id); if (owner == RING_OWNER_ETH0 || owner == RING_OWNER_ETH1) xgene_enet_ring_set_recombbuf(ring); xgene_enet_ring_init(ring); xgene_enet_write_ring_state(ring); } static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) { u32 ring_id_val, ring_id_buf; bool is_bufpool; if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) return; is_bufpool = xgene_enet_is_bufpool(ring->id); ring_id_val = ring->id & GENMASK(9, 0); ring_id_val |= OVERWRITE; ring_id_buf = (ring->num << 9) & GENMASK(18, 9); ring_id_buf |= PREFETCH_BUF_EN; if (is_bufpool) ring_id_buf |= IS_BUFFER_POOL; xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val); xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf); } static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring) { u32 ring_id; ring_id = ring->id | OVERWRITE; xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id); xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0); } static struct xgene_enet_desc_ring *xgene_enet_setup_ring( struct xgene_enet_desc_ring *ring) { bool is_bufpool; u32 addr, i; xgene_enet_clr_ring_state(ring); xgene_enet_set_ring_state(ring); xgene_enet_set_ring_id(ring); ring->slots = xgene_enet_get_numslots(ring->id, ring->size); is_bufpool = xgene_enet_is_bufpool(ring->id); if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) return ring; addr = CSR_VMID0_INTR_MBOX + (4 * (ring->id & RING_BUFNUM_MASK)); xgene_enet_ring_wr32(ring, addr, ring->irq_mbox_dma >> 10); for (i = 0; i < ring->slots; i++) xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]); return ring; } static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring) { xgene_enet_clr_desc_ring_id(ring); xgene_enet_clr_ring_state(ring); } static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count) { u32 data = 0; if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) { data = SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK) | INTR_CLEAR; } data |= (count & GENMASK(16, 0)); iowrite32(data, ring->cmd); } static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) { u32 __iomem *cmd_base = ring->cmd_base; u32 ring_state, num_msgs; ring_state = ioread32(&cmd_base[1]); num_msgs = GET_VAL(X2_NUMMSGSINQ, ring_state); return num_msgs; } static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) { u32 data = 0x77777777; xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data); xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data); xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data); xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08); xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10); } struct xgene_ring_ops xgene_ring2_ops = { .num_ring_config = X2_NUM_RING_CONFIG, .num_ring_id_shift = 13, .setup = xgene_enet_setup_ring, .clear = xgene_enet_clear_ring, .wr_cmd = xgene_enet_wr_cmd, .len = xgene_enet_ring_len, .coalesce = xgene_enet_setup_coalescing, };
linux-master
drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Applied Micro X-Gene SoC Ethernet Driver * * Copyright (c) 2014, Applied Micro Circuits Corporation * Authors: Iyappan Subramanian <[email protected]> * Ravi Patel <[email protected]> * Keyur Chudgar <[email protected]> */ #include "xgene_enet_main.h" #include "xgene_enet_hw.h" static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) { u32 *ring_cfg = ring->state; u64 addr = ring->dma; enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize; ring_cfg[4] |= (1 << SELTHRSH_POS) & CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN); ring_cfg[3] |= ACCEPTLERR; ring_cfg[2] |= QCOHERENT; addr >>= 8; ring_cfg[2] |= (addr << RINGADDRL_POS) & CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN); addr >>= RINGADDRL_LEN; ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN); ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) & CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN); } static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) { u32 *ring_cfg = ring->state; bool is_bufpool; u32 val; is_bufpool = xgene_enet_is_bufpool(ring->id); val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR; ring_cfg[4] |= (val << RINGTYPE_POS) & CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN); if (is_bufpool) { ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) & CREATE_MASK(RINGMODE_POS, RINGMODE_LEN); } } static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) { u32 *ring_cfg = ring->state; ring_cfg[3] |= RECOMBBUF; ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) & CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN); ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN); } static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring, u32 offset, u32 data) { struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); iowrite32(data, pdata->ring_csr_addr + offset); } static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring, u32 offset, u32 *data) { struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); *data = ioread32(pdata->ring_csr_addr + offset); } static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring) { struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); int i; xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num); for (i = 0; i < pdata->ring_ops->num_ring_config; i++) { xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4), ring->state[i]); } } static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring) { memset(ring->state, 0, sizeof(ring->state)); xgene_enet_write_ring_state(ring); } static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring) { xgene_enet_ring_set_type(ring); if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 || xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1) xgene_enet_ring_set_recombbuf(ring); xgene_enet_ring_init(ring); xgene_enet_write_ring_state(ring); } static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) { u32 ring_id_val, ring_id_buf; bool is_bufpool; is_bufpool = xgene_enet_is_bufpool(ring->id); ring_id_val = ring->id & GENMASK(9, 0); ring_id_val |= OVERWRITE; ring_id_buf = (ring->num << 9) & GENMASK(18, 9); ring_id_buf |= PREFETCH_BUF_EN; if (is_bufpool) ring_id_buf |= IS_BUFFER_POOL; xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val); xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf); } static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring) { u32 ring_id; ring_id = ring->id | OVERWRITE; xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id); xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0); } static struct xgene_enet_desc_ring *xgene_enet_setup_ring( struct xgene_enet_desc_ring *ring) { u32 size = ring->size; u32 i, data; bool is_bufpool; xgene_enet_clr_ring_state(ring); xgene_enet_set_ring_state(ring); xgene_enet_set_ring_id(ring); ring->slots = xgene_enet_get_numslots(ring->id, size); is_bufpool = xgene_enet_is_bufpool(ring->id); if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) return ring; for (i = 0; i < ring->slots; i++) xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]); xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); data |= BIT(31 - xgene_enet_ring_bufnum(ring->id)); xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); return ring; } static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring) { u32 data; bool is_bufpool; is_bufpool = xgene_enet_is_bufpool(ring->id); if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) goto out; xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id)); xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); out: xgene_enet_clr_desc_ring_id(ring); xgene_enet_clr_ring_state(ring); } static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count) { iowrite32(count, ring->cmd); } static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) { u32 __iomem *cmd_base = ring->cmd_base; u32 ring_state, num_msgs; ring_state = ioread32(&cmd_base[1]); num_msgs = GET_VAL(NUMMSGSINQ, ring_state); return num_msgs; } void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, enum xgene_enet_err_code status) { switch (status) { case INGRESS_CRC: ring->rx_crc_errors++; break; case INGRESS_CHECKSUM: case INGRESS_CHECKSUM_COMPUTE: ring->rx_errors++; break; case INGRESS_TRUNC_FRAME: ring->rx_frame_errors++; break; case INGRESS_PKT_LEN: ring->rx_length_errors++; break; case INGRESS_PKT_UNDER: ring->rx_frame_errors++; break; case INGRESS_FIFO_OVERRUN: ring->rx_fifo_errors++; break; default: break; } } static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 val) { void __iomem *addr = pdata->eth_csr_addr + offset; iowrite32(val, addr); } static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata, u32 offset, u32 val) { void __iomem *addr = pdata->eth_ring_if_addr + offset; iowrite32(val, addr); } static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 val) { void __iomem *addr = pdata->eth_diag_csr_addr + offset; iowrite32(val, addr); } static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 val) { void __iomem *addr = pdata->mcx_mac_csr_addr + offset; iowrite32(val, addr); } void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, u32 wr_addr, u32 wr_data) { void __iomem *addr, *wr, *cmd, *cmd_done; struct net_device *ndev = pdata->ndev; u8 wait = 10; u32 done; if (pdata->mdio_driver && ndev->phydev && phy_interface_mode_is_rgmii(pdata->phy_mode)) { struct mii_bus *bus = ndev->phydev->mdio.bus; return xgene_mdio_wr_mac(bus->priv, wr_addr, wr_data); } addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; spin_lock(&pdata->mac_lock); iowrite32(wr_addr, addr); iowrite32(wr_data, wr); iowrite32(XGENE_ENET_WR_CMD, cmd); while (!(done = ioread32(cmd_done)) && wait--) udelay(1); if (!done) netdev_err(ndev, "mac write failed, addr: %04x data: %08x\n", wr_addr, wr_data); iowrite32(0, cmd); spin_unlock(&pdata->mac_lock); } static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 *val) { void __iomem *addr = pdata->eth_csr_addr + offset; *val = ioread32(addr); } static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 *val) { void __iomem *addr = pdata->eth_diag_csr_addr + offset; *val = ioread32(addr); } static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 *val) { void __iomem *addr = pdata->mcx_mac_csr_addr + offset; *val = ioread32(addr); } u32 xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, u32 rd_addr) { void __iomem *addr, *rd, *cmd, *cmd_done; struct net_device *ndev = pdata->ndev; u32 done, rd_data; u8 wait = 10; if (pdata->mdio_driver && ndev->phydev && phy_interface_mode_is_rgmii(pdata->phy_mode)) { struct mii_bus *bus = ndev->phydev->mdio.bus; return xgene_mdio_rd_mac(bus->priv, rd_addr); } addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET; cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; spin_lock(&pdata->mac_lock); iowrite32(rd_addr, addr); iowrite32(XGENE_ENET_RD_CMD, cmd); while (!(done = ioread32(cmd_done)) && wait--) udelay(1); if (!done) netdev_err(ndev, "mac read failed, addr: %04x\n", rd_addr); rd_data = ioread32(rd); iowrite32(0, cmd); spin_unlock(&pdata->mac_lock); return rd_data; } u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr) { void __iomem *addr, *rd, *cmd, *cmd_done; u32 done, rd_data; u8 wait = 10; addr = pdata->mcx_stats_addr + STAT_ADDR_REG_OFFSET; rd = pdata->mcx_stats_addr + STAT_READ_REG_OFFSET; cmd = pdata->mcx_stats_addr + STAT_COMMAND_REG_OFFSET; cmd_done = pdata->mcx_stats_addr + STAT_COMMAND_DONE_REG_OFFSET; spin_lock(&pdata->stats_lock); iowrite32(rd_addr, addr); iowrite32(XGENE_ENET_RD_CMD, cmd); while (!(done = ioread32(cmd_done)) && wait--) udelay(1); if (!done) netdev_err(pdata->ndev, "mac stats read failed, addr: %04x\n", rd_addr); rd_data = ioread32(rd); iowrite32(0, cmd); spin_unlock(&pdata->stats_lock); return rd_data; } static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) { const u8 *dev_addr = pdata->ndev->dev_addr; u32 addr0, addr1; addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | (dev_addr[1] << 8) | dev_addr[0]; addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); xgene_enet_wr_mac(pdata, STATION_ADDR0_ADDR, addr0); xgene_enet_wr_mac(pdata, STATION_ADDR1_ADDR, addr1); } static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) { struct net_device *ndev = pdata->ndev; u32 data; u8 wait = 10; xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); do { usleep_range(100, 110); xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data); } while ((data != 0xffffffff) && wait--); if (data != 0xffffffff) { netdev_err(ndev, "Failed to release memory from shutdown\n"); return -ENODEV; } return 0; } static void xgene_gmac_reset(struct xgene_enet_pdata *pdata) { xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1); xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, 0); } static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; if (dev->of_node) { struct clk *parent = clk_get_parent(pdata->clk); switch (pdata->phy_speed) { case SPEED_10: clk_set_rate(parent, 2500000); break; case SPEED_100: clk_set_rate(parent, 25000000); break; default: clk_set_rate(parent, 125000000); break; } } #ifdef CONFIG_ACPI else { switch (pdata->phy_speed) { case SPEED_10: acpi_evaluate_object(ACPI_HANDLE(dev), "S10", NULL, NULL); break; case SPEED_100: acpi_evaluate_object(ACPI_HANDLE(dev), "S100", NULL, NULL); break; default: acpi_evaluate_object(ACPI_HANDLE(dev), "S1G", NULL, NULL); break; } } #endif } static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata) { u32 icm0, icm2, mc2; u32 intf_ctl, rgmii, value; xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0); xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2); mc2 = xgene_enet_rd_mac(pdata, MAC_CONFIG_2_ADDR); intf_ctl = xgene_enet_rd_mac(pdata, INTERFACE_CONTROL_ADDR); xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii); switch (pdata->phy_speed) { case SPEED_10: ENET_INTERFACE_MODE2_SET(&mc2, 1); intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE); CFG_MACMODE_SET(&icm0, 0); CFG_WAITASYNCRD_SET(&icm2, 500); rgmii &= ~CFG_SPEED_1250; break; case SPEED_100: ENET_INTERFACE_MODE2_SET(&mc2, 1); intf_ctl &= ~ENET_GHD_MODE; intf_ctl |= ENET_LHD_MODE; CFG_MACMODE_SET(&icm0, 1); CFG_WAITASYNCRD_SET(&icm2, 80); rgmii &= ~CFG_SPEED_1250; break; default: ENET_INTERFACE_MODE2_SET(&mc2, 2); intf_ctl &= ~ENET_LHD_MODE; intf_ctl |= ENET_GHD_MODE; CFG_MACMODE_SET(&icm0, 2); CFG_WAITASYNCRD_SET(&icm2, 0); CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay); CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay); rgmii |= CFG_SPEED_1250; xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value); value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value); break; } mc2 |= FULL_DUPLEX2 | PAD_CRC | LENGTH_CHK; xgene_enet_wr_mac(pdata, MAC_CONFIG_2_ADDR, mc2); xgene_enet_wr_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); xgene_enet_configure_clock(pdata); xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0); xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); } static void xgene_enet_set_frame_size(struct xgene_enet_pdata *pdata, int size) { xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size); } static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata *pdata, bool enable) { u32 data; xgene_enet_rd_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, &data); if (enable) data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN; else data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN); xgene_enet_wr_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, data); } static void xgene_gmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable) { u32 data; data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR); if (enable) data |= TX_FLOW_EN; else data &= ~TX_FLOW_EN; xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data); pdata->mac_ops->enable_tx_pause(pdata, enable); } static void xgene_gmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable) { u32 data; data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR); if (enable) data |= RX_FLOW_EN; else data &= ~RX_FLOW_EN; xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data); } static void xgene_gmac_init(struct xgene_enet_pdata *pdata) { u32 value; if (!pdata->mdio_driver) xgene_gmac_reset(pdata); xgene_gmac_set_speed(pdata); xgene_gmac_set_mac_addr(pdata); /* Adjust MDC clock frequency */ value = xgene_enet_rd_mac(pdata, MII_MGMT_CONFIG_ADDR); MGMT_CLOCK_SEL_SET(&value, 7); xgene_enet_wr_mac(pdata, MII_MGMT_CONFIG_ADDR, value); /* Enable drop if bufpool not available */ xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value); value |= CFG_RSIF_FPBUFF_TIMEOUT_EN; xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value); /* Rtype should be copied from FP */ xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); /* Configure HW pause frame generation */ xgene_enet_rd_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, &value); value = (DEF_QUANTA << 16) | (value & 0xFFFF); xgene_enet_wr_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, value); xgene_enet_wr_csr(pdata, RXBUF_PAUSE_THRESH, DEF_PAUSE_THRES); xgene_enet_wr_csr(pdata, RXBUF_PAUSE_OFF_THRESH, DEF_PAUSE_OFF_THRES); xgene_gmac_flowctl_tx(pdata, pdata->tx_pause); xgene_gmac_flowctl_rx(pdata, pdata->rx_pause); /* Rx-Tx traffic resume */ xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value); value &= ~TX_DV_GATE_EN0; value &= ~RX_DV_GATE_EN0; value |= RESUME_RX0; xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value); xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX); } static void xgene_gmac_get_drop_cnt(struct xgene_enet_pdata *pdata, u32 *rx, u32 *tx) { u32 count; xgene_enet_rd_mcx_csr(pdata, ICM_ECM_DROP_COUNT_REG0_ADDR, &count); *rx = ICM_DROP_COUNT(count); *tx = ECM_DROP_COUNT(count); /* Errata: 10GE_4 - Fix ICM_ECM_DROP_COUNT not clear-on-read */ xgene_enet_rd_mcx_csr(pdata, ECM_CONFIG0_REG_0_ADDR, &count); } static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) { u32 val = 0xffffffff; xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val); xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val); xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val); xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val); } static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, u32 dst_ring_num, u16 bufpool_id, u16 nxtbufpool_id) { u32 cb; u32 fpsel, nxtfpsel; fpsel = xgene_enet_get_fpsel(bufpool_id); nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id); xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); cb |= CFG_CLE_BYPASS_EN0; CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); CFG_CLE_IP_HDR_LEN_SET(&cb, 0); xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); CFG_CLE_FPSEL0_SET(&cb, fpsel); CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel); xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb); } static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata) { u32 data; data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR); xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN); } static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata) { u32 data; data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR); xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN); } static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata) { u32 data; data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR); xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN); } static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) { u32 data; data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR); xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); } bool xgene_ring_mgr_init(struct xgene_enet_pdata *p) { if (!ioread32(p->ring_csr_addr + CLKEN_ADDR)) return false; if (ioread32(p->ring_csr_addr + SRST_ADDR)) return false; return true; } static int xgene_enet_reset(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; if (!xgene_ring_mgr_init(pdata)) return -ENODEV; if (pdata->mdio_driver) { xgene_enet_config_ring_if_assoc(pdata); return 0; } if (dev->of_node) { clk_prepare_enable(pdata->clk); udelay(5); clk_disable_unprepare(pdata->clk); udelay(5); clk_prepare_enable(pdata->clk); udelay(5); } else { #ifdef CONFIG_ACPI acpi_status status; status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), "_RST", NULL, NULL); if (ACPI_FAILURE(status)) { acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), "_INI", NULL, NULL); } #endif } xgene_enet_ecc_init(pdata); xgene_enet_config_ring_if_assoc(pdata); return 0; } static void xgene_enet_clear(struct xgene_enet_pdata *pdata, struct xgene_enet_desc_ring *ring) { u32 addr, data; if (xgene_enet_is_bufpool(ring->id)) { addr = ENET_CFGSSQMIFPRESET_ADDR; data = BIT(xgene_enet_get_fpsel(ring->id)); } else { addr = ENET_CFGSSQMIWQRESET_ADDR; data = BIT(xgene_enet_ring_bufnum(ring->id)); } xgene_enet_wr_ring_if(pdata, addr, data); } static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; if (dev->of_node) { if (!IS_ERR(pdata->clk)) clk_disable_unprepare(pdata->clk); } } static u32 xgene_enet_flowctrl_cfg(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; u16 lcladv, rmtadv = 0; u32 rx_pause, tx_pause; u8 flowctl = 0; if (!phydev->duplex || !pdata->pause_autoneg) return 0; if (pdata->tx_pause) flowctl |= FLOW_CTRL_TX; if (pdata->rx_pause) flowctl |= FLOW_CTRL_RX; lcladv = mii_advertise_flowctrl(flowctl); if (phydev->pause) rmtadv = LPA_PAUSE_CAP; if (phydev->asym_pause) rmtadv |= LPA_PAUSE_ASYM; flowctl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); tx_pause = !!(flowctl & FLOW_CTRL_TX); rx_pause = !!(flowctl & FLOW_CTRL_RX); if (tx_pause != pdata->tx_pause) { pdata->tx_pause = tx_pause; pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause); } if (rx_pause != pdata->rx_pause) { pdata->rx_pause = rx_pause; pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause); } return 0; } static void xgene_enet_adjust_link(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); const struct xgene_mac_ops *mac_ops = pdata->mac_ops; struct phy_device *phydev = ndev->phydev; if (phydev->link) { if (pdata->phy_speed != phydev->speed) { pdata->phy_speed = phydev->speed; mac_ops->set_speed(pdata); mac_ops->rx_enable(pdata); mac_ops->tx_enable(pdata); phy_print_status(phydev); } xgene_enet_flowctrl_cfg(ndev); } else { mac_ops->rx_disable(pdata); mac_ops->tx_disable(pdata); pdata->phy_speed = SPEED_UNKNOWN; phy_print_status(phydev); } } #ifdef CONFIG_ACPI static struct acpi_device *acpi_phy_find_device(struct device *dev) { struct fwnode_reference_args args; struct fwnode_handle *fw_node; int status; fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev)); status = acpi_node_get_property_reference(fw_node, "phy-handle", 0, &args); if (ACPI_FAILURE(status) || !is_acpi_device_node(args.fwnode)) { dev_dbg(dev, "No matching phy in ACPI table\n"); return NULL; } return to_acpi_device_node(args.fwnode); } #endif int xgene_enet_phy_connect(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct device_node *np; struct phy_device *phy_dev; struct device *dev = &pdata->pdev->dev; int i; if (dev->of_node) { for (i = 0 ; i < 2; i++) { np = of_parse_phandle(dev->of_node, "phy-handle", i); phy_dev = of_phy_connect(ndev, np, &xgene_enet_adjust_link, 0, pdata->phy_mode); of_node_put(np); if (phy_dev) break; } if (!phy_dev) { netdev_err(ndev, "Could not connect to PHY\n"); return -ENODEV; } } else { #ifdef CONFIG_ACPI struct acpi_device *adev = acpi_phy_find_device(dev); if (adev) phy_dev = adev->driver_data; else phy_dev = NULL; if (!phy_dev || phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, pdata->phy_mode)) { netdev_err(ndev, "Could not connect to PHY\n"); return -ENODEV; } #else return -ENODEV; #endif } pdata->phy_speed = SPEED_UNKNOWN; phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); phy_support_asym_pause(phy_dev); return 0; } static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, struct mii_bus *mdio) { struct device *dev = &pdata->pdev->dev; struct net_device *ndev = pdata->ndev; struct phy_device *phy; struct device_node *child_np; struct device_node *mdio_np = NULL; u32 phy_addr; int ret; if (dev->of_node) { for_each_child_of_node(dev->of_node, child_np) { if (of_device_is_compatible(child_np, "apm,xgene-mdio")) { mdio_np = child_np; break; } } if (!mdio_np) { netdev_dbg(ndev, "No mdio node in the dts\n"); return -ENXIO; } return of_mdiobus_register(mdio, mdio_np); } /* Mask out all PHYs from auto probing. */ mdio->phy_mask = ~0; /* Register the MDIO bus */ ret = mdiobus_register(mdio); if (ret) return ret; ret = device_property_read_u32(dev, "phy-channel", &phy_addr); if (ret) ret = device_property_read_u32(dev, "phy-addr", &phy_addr); if (ret) return -EINVAL; phy = xgene_enet_phy_register(mdio, phy_addr); if (!phy) return -EIO; return ret; } int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) { struct net_device *ndev = pdata->ndev; struct mii_bus *mdio_bus; int ret; mdio_bus = mdiobus_alloc(); if (!mdio_bus) return -ENOMEM; mdio_bus->name = "APM X-Gene MDIO bus"; mdio_bus->read = xgene_mdio_rgmii_read; mdio_bus->write = xgene_mdio_rgmii_write; snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii", ndev->name); mdio_bus->priv = (void __force *)pdata->mcx_mac_addr; mdio_bus->parent = &pdata->pdev->dev; ret = xgene_mdiobus_register(pdata, mdio_bus); if (ret) { netdev_err(ndev, "Failed to register MDIO bus\n"); mdiobus_free(mdio_bus); return ret; } pdata->mdio_bus = mdio_bus; ret = xgene_enet_phy_connect(ndev); if (ret) xgene_enet_mdio_remove(pdata); return ret; } void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata) { struct net_device *ndev = pdata->ndev; if (ndev->phydev) phy_disconnect(ndev->phydev); } void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) { struct net_device *ndev = pdata->ndev; if (ndev->phydev) phy_disconnect(ndev->phydev); mdiobus_unregister(pdata->mdio_bus); mdiobus_free(pdata->mdio_bus); pdata->mdio_bus = NULL; } const struct xgene_mac_ops xgene_gmac_ops = { .init = xgene_gmac_init, .reset = xgene_gmac_reset, .rx_enable = xgene_gmac_rx_enable, .tx_enable = xgene_gmac_tx_enable, .rx_disable = xgene_gmac_rx_disable, .tx_disable = xgene_gmac_tx_disable, .get_drop_cnt = xgene_gmac_get_drop_cnt, .set_speed = xgene_gmac_set_speed, .set_mac_addr = xgene_gmac_set_mac_addr, .set_framesize = xgene_enet_set_frame_size, .enable_tx_pause = xgene_gmac_enable_tx_pause, .flowctl_tx = xgene_gmac_flowctl_tx, .flowctl_rx = xgene_gmac_flowctl_rx, }; const struct xgene_port_ops xgene_gport_ops = { .reset = xgene_enet_reset, .clear = xgene_enet_clear, .cle_bypass = xgene_enet_cle_bypass, .shutdown = xgene_gport_shutdown, }; struct xgene_ring_ops xgene_ring1_ops = { .num_ring_config = NUM_RING_CONFIG, .num_ring_id_shift = 6, .setup = xgene_enet_setup_ring, .clear = xgene_enet_clear_ring, .wr_cmd = xgene_enet_wr_cmd, .len = xgene_enet_ring_len, };
linux-master
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Applied Micro X-Gene SoC Ethernet Driver * * Copyright (c) 2014, Applied Micro Circuits Corporation * Authors: Iyappan Subramanian <[email protected]> * Keyur Chudgar <[email protected]> */ #include <linux/of_gpio.h> #include <linux/gpio.h> #include "xgene_enet_main.h" #include "xgene_enet_hw.h" #include "xgene_enet_xgmac.h" static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 val) { void __iomem *addr = pdata->eth_csr_addr + offset; iowrite32(val, addr); } static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata, u32 offset, u32 val) { void __iomem *addr = pdata->eth_ring_if_addr + offset; iowrite32(val, addr); } static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 val) { void __iomem *addr = pdata->eth_diag_csr_addr + offset; iowrite32(val, addr); } static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr, void __iomem *cmd, void __iomem *cmd_done, u32 wr_addr, u32 wr_data) { u32 done; u8 wait = 10; iowrite32(wr_addr, addr); iowrite32(wr_data, wr); iowrite32(XGENE_ENET_WR_CMD, cmd); /* wait for write command to complete */ while (!(done = ioread32(cmd_done)) && wait--) udelay(1); if (!done) return false; iowrite32(0, cmd); return true; } static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata, u32 wr_addr, u32 wr_data) { void __iomem *addr, *wr, *cmd, *cmd_done; addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET; wr = pdata->pcs_addr + PCS_WRITE_REG_OFFSET; cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET; cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET; if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) netdev_err(pdata->ndev, "PCS write failed, addr: %04x\n", wr_addr); } static void xgene_enet_wr_axg_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 val) { void __iomem *addr = pdata->mcx_mac_csr_addr + offset; iowrite32(val, addr); } static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 *val) { void __iomem *addr = pdata->eth_csr_addr + offset; *val = ioread32(addr); } static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 *val) { void __iomem *addr = pdata->eth_diag_csr_addr + offset; *val = ioread32(addr); } static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd, void __iomem *cmd, void __iomem *cmd_done, u32 rd_addr, u32 *rd_data) { u32 done; u8 wait = 10; iowrite32(rd_addr, addr); iowrite32(XGENE_ENET_RD_CMD, cmd); /* wait for read command to complete */ while (!(done = ioread32(cmd_done)) && wait--) udelay(1); if (!done) return false; *rd_data = ioread32(rd); iowrite32(0, cmd); return true; } static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata, u32 rd_addr, u32 *rd_data) { void __iomem *addr, *rd, *cmd, *cmd_done; bool success; addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET; rd = pdata->pcs_addr + PCS_READ_REG_OFFSET; cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET; cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET; success = xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data); if (!success) netdev_err(pdata->ndev, "PCS read failed, addr: %04x\n", rd_addr); return success; } static void xgene_enet_rd_axg_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 *val) { void __iomem *addr = pdata->mcx_mac_csr_addr + offset; *val = ioread32(addr); } static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) { struct net_device *ndev = pdata->ndev; u32 data; u8 wait = 10; xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); do { usleep_range(100, 110); xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data); } while ((data != 0xffffffff) && wait--); if (data != 0xffffffff) { netdev_err(ndev, "Failed to release memory from shutdown\n"); return -ENODEV; } return 0; } static void xgene_xgmac_get_drop_cnt(struct xgene_enet_pdata *pdata, u32 *rx, u32 *tx) { u32 count; xgene_enet_rd_axg_csr(pdata, XGENET_ICM_ECM_DROP_COUNT_REG0, &count); *rx = ICM_DROP_COUNT(count); *tx = ECM_DROP_COUNT(count); /* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */ xgene_enet_rd_axg_csr(pdata, XGENET_ECM_CONFIG0_REG_0, &count); } static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) { xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, 0); xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, 0); xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, 0); xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, 0); } static void xgene_xgmac_reset(struct xgene_enet_pdata *pdata) { xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, HSTMACRST); xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, 0); } static void xgene_pcs_reset(struct xgene_enet_pdata *pdata) { u32 data; if (!xgene_enet_rd_pcs(pdata, PCS_CONTROL_1, &data)) return; xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data | PCS_CTRL_PCS_RST); xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data & ~PCS_CTRL_PCS_RST); } static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata) { const u8 *dev_addr = pdata->ndev->dev_addr; u32 addr0, addr1; addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | (dev_addr[1] << 8) | dev_addr[0]; addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); xgene_enet_wr_mac(pdata, HSTMACADR_LSW_ADDR, addr0); xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1); } static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata, u16 mss, u8 index) { u8 offset; u32 data; offset = (index < 2) ? 0 : 4; xgene_enet_rd_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, &data); if (!(index & 0x1)) data = SET_VAL(TSO_MSS1, data >> TSO_MSS1_POS) | SET_VAL(TSO_MSS0, mss); else data = SET_VAL(TSO_MSS1, mss) | SET_VAL(TSO_MSS0, data); xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, data); } static void xgene_xgmac_set_frame_size(struct xgene_enet_pdata *pdata, int size) { xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR, ((((size + 2) >> 2) << 16) | size)); } static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata) { u32 data; xgene_enet_rd_csr(pdata, XG_LINK_STATUS_ADDR, &data); return data; } static void xgene_xgmac_enable_tx_pause(struct xgene_enet_pdata *pdata, bool enable) { u32 data; xgene_enet_rd_axg_csr(pdata, XGENET_CSR_ECM_CFG_0_ADDR, &data); if (enable) data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN; else data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN); xgene_enet_wr_axg_csr(pdata, XGENET_CSR_ECM_CFG_0_ADDR, data); } static void xgene_xgmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable) { u32 data; data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1); if (enable) data |= HSTTCTLEN; else data &= ~HSTTCTLEN; xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data); pdata->mac_ops->enable_tx_pause(pdata, enable); } static void xgene_xgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable) { u32 data; data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1); if (enable) data |= HSTRCTLEN; else data &= ~HSTRCTLEN; xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data); } static void xgene_xgmac_init(struct xgene_enet_pdata *pdata) { u32 data; xgene_xgmac_reset(pdata); data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1); data |= HSTPPEN; data &= ~HSTLENCHK; xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data); xgene_xgmac_set_mac_addr(pdata); xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data); data |= CFG_RSIF_FPBUFF_TIMEOUT_EN; /* Errata 10GE_1 - FIFO threshold default value incorrect */ RSIF_CLE_BUFF_THRESH_SET(&data, XG_RSIF_CLE_BUFF_THRESH); xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data); /* Errata 10GE_1 - FIFO threshold default value incorrect */ xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, &data); RSIF_PLC_CLE_BUFF_THRESH_SET(&data, XG_RSIF_PLC_CLE_BUFF_THRESH); xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, data); xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data); data |= BIT(12); xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data); xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82); xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0); xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX); /* Configure HW pause frame generation */ xgene_enet_rd_axg_csr(pdata, XGENET_CSR_MULTI_DPF0_ADDR, &data); data = (DEF_QUANTA << 16) | (data & 0xFFFF); xgene_enet_wr_axg_csr(pdata, XGENET_CSR_MULTI_DPF0_ADDR, data); if (pdata->enet_id != XGENE_ENET1) { xgene_enet_rd_axg_csr(pdata, XGENET_CSR_MULTI_DPF1_ADDR, &data); data = (NORM_PAUSE_OPCODE << 16) | (data & 0xFFFF); xgene_enet_wr_axg_csr(pdata, XGENET_CSR_MULTI_DPF1_ADDR, data); } data = (XG_DEF_PAUSE_OFF_THRES << 16) | XG_DEF_PAUSE_THRES; xgene_enet_wr_csr(pdata, XG_RXBUF_PAUSE_THRESH, data); xgene_xgmac_flowctl_tx(pdata, pdata->tx_pause); xgene_xgmac_flowctl_rx(pdata, pdata->rx_pause); } static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata) { u32 data; data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1); xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTRFEN); } static void xgene_xgmac_tx_enable(struct xgene_enet_pdata *pdata) { u32 data; data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1); xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTTFEN); } static void xgene_xgmac_rx_disable(struct xgene_enet_pdata *pdata) { u32 data; data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1); xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTRFEN); } static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata) { u32 data; data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1); xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN); } static int xgene_enet_reset(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; if (!xgene_ring_mgr_init(pdata)) return -ENODEV; if (dev->of_node) { clk_prepare_enable(pdata->clk); udelay(5); clk_disable_unprepare(pdata->clk); udelay(5); clk_prepare_enable(pdata->clk); udelay(5); } else { #ifdef CONFIG_ACPI acpi_status status; status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), "_RST", NULL, NULL); if (ACPI_FAILURE(status)) { acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), "_INI", NULL, NULL); } #endif } xgene_enet_ecc_init(pdata); xgene_enet_config_ring_if_assoc(pdata); return 0; } static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, u32 dst_ring_num, u16 bufpool_id, u16 nxtbufpool_id) { u32 cb, fpsel, nxtfpsel; xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb); cb |= CFG_CLE_BYPASS_EN0; CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb); fpsel = xgene_enet_get_fpsel(bufpool_id); nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id); xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb); CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); CFG_CLE_FPSEL0_SET(&cb, fpsel); CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel); xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb); pr_info("+ cle_bypass: fpsel: %d nxtfpsel: %d\n", fpsel, nxtfpsel); } static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; if (dev->of_node) { if (!IS_ERR(pdata->clk)) clk_disable_unprepare(pdata->clk); } } static void xgene_enet_clear(struct xgene_enet_pdata *pdata, struct xgene_enet_desc_ring *ring) { u32 addr, data; if (xgene_enet_is_bufpool(ring->id)) { addr = ENET_CFGSSQMIFPRESET_ADDR; data = BIT(xgene_enet_get_fpsel(ring->id)); } else { addr = ENET_CFGSSQMIWQRESET_ADDR; data = BIT(xgene_enet_ring_bufnum(ring->id)); } xgene_enet_wr_ring_if(pdata, addr, data); } static int xgene_enet_gpio_lookup(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN); if (IS_ERR(pdata->sfp_rdy)) pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN); if (IS_ERR(pdata->sfp_rdy)) return -ENODEV; return 0; } static void xgene_enet_link_state(struct work_struct *work) { struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work), struct xgene_enet_pdata, link_work); struct net_device *ndev = pdata->ndev; u32 link_status, poll_interval; link_status = xgene_enet_link_status(pdata); if (pdata->sfp_gpio_en && link_status && (!IS_ERR(pdata->sfp_rdy) || !xgene_enet_gpio_lookup(pdata)) && !gpiod_get_value(pdata->sfp_rdy)) link_status = 0; if (link_status) { if (!netif_carrier_ok(ndev)) { netif_carrier_on(ndev); xgene_xgmac_rx_enable(pdata); xgene_xgmac_tx_enable(pdata); netdev_info(ndev, "Link is Up - 10Gbps\n"); } poll_interval = PHY_POLL_LINK_ON; } else { if (netif_carrier_ok(ndev)) { xgene_xgmac_rx_disable(pdata); xgene_xgmac_tx_disable(pdata); netif_carrier_off(ndev); netdev_info(ndev, "Link is Down\n"); } poll_interval = PHY_POLL_LINK_OFF; xgene_pcs_reset(pdata); } schedule_delayed_work(&pdata->link_work, poll_interval); } const struct xgene_mac_ops xgene_xgmac_ops = { .init = xgene_xgmac_init, .reset = xgene_xgmac_reset, .rx_enable = xgene_xgmac_rx_enable, .tx_enable = xgene_xgmac_tx_enable, .rx_disable = xgene_xgmac_rx_disable, .tx_disable = xgene_xgmac_tx_disable, .set_mac_addr = xgene_xgmac_set_mac_addr, .set_framesize = xgene_xgmac_set_frame_size, .set_mss = xgene_xgmac_set_mss, .get_drop_cnt = xgene_xgmac_get_drop_cnt, .link_state = xgene_enet_link_state, .enable_tx_pause = xgene_xgmac_enable_tx_pause, .flowctl_rx = xgene_xgmac_flowctl_rx, .flowctl_tx = xgene_xgmac_flowctl_tx }; const struct xgene_port_ops xgene_xgport_ops = { .reset = xgene_enet_reset, .clear = xgene_enet_clear, .cle_bypass = xgene_enet_xgcle_bypass, .shutdown = xgene_enet_shutdown, };
linux-master
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Applied Micro X-Gene SoC Ethernet Classifier structures * * Copyright (c) 2016, Applied Micro Circuits Corporation * Authors: Khuong Dinh <[email protected]> * Tanmay Inamdar <[email protected]> * Iyappan Subramanian <[email protected]> */ #include "xgene_enet_main.h" /* interfaces to convert structures to HW recognized bit formats */ static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver, enum xgene_cle_prot_type type, u32 len, u32 *reg) { *reg = SET_VAL(SB_IPFRAG, frag) | SET_VAL(SB_IPPROT, type) | SET_VAL(SB_IPVER, ver) | SET_VAL(SB_HDRLEN, len); } static void xgene_cle_idt_to_hw(struct xgene_enet_pdata *pdata, u32 dstqid, u32 fpsel, u32 nfpsel, u32 *idt_reg) { if (pdata->enet_id == XGENE_ENET1) { *idt_reg = SET_VAL(IDT_DSTQID, dstqid) | SET_VAL(IDT_FPSEL1, fpsel) | SET_VAL(IDT_NFPSEL1, nfpsel); } else { *idt_reg = SET_VAL(IDT_DSTQID, dstqid) | SET_VAL(IDT_FPSEL, fpsel) | SET_VAL(IDT_NFPSEL, nfpsel); } } static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata, struct xgene_cle_dbptr *dbptr, u32 *buf) { buf[0] = SET_VAL(CLE_DROP, dbptr->drop); buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) | SET_VAL(CLE_NFPSEL, dbptr->nxtfpsel) | SET_VAL(CLE_DSTQIDL, dbptr->dstqid); buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) | SET_VAL(CLE_PRIORITY, dbptr->cle_priority); } static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn *kn, u32 *buf) { u32 i, j = 0; u32 data; buf[j++] = SET_VAL(CLE_TYPE, kn->node_type); for (i = 0; i < kn->num_keys; i++) { struct xgene_cle_ptree_key *key = &kn->key[i]; if (!(i % 2)) { buf[j] = SET_VAL(CLE_KN_PRIO, key->priority) | SET_VAL(CLE_KN_RPTR, key->result_pointer); } else { data = SET_VAL(CLE_KN_PRIO, key->priority) | SET_VAL(CLE_KN_RPTR, key->result_pointer); buf[j++] |= (data << 16); } } } static void xgene_cle_dn_to_hw(const struct xgene_cle_ptree_ewdn *dn, u32 *buf, u32 jb) { const struct xgene_cle_ptree_branch *br; u32 i, j = 0; u32 npp; buf[j++] = SET_VAL(CLE_DN_TYPE, dn->node_type) | SET_VAL(CLE_DN_LASTN, dn->last_node) | SET_VAL(CLE_DN_HLS, dn->hdr_len_store) | SET_VAL(CLE_DN_EXT, dn->hdr_extn) | SET_VAL(CLE_DN_BSTOR, dn->byte_store) | SET_VAL(CLE_DN_SBSTOR, dn->search_byte_store) | SET_VAL(CLE_DN_RPTR, dn->result_pointer); for (i = 0; i < dn->num_branches; i++) { br = &dn->branch[i]; npp = br->next_packet_pointer; if ((br->jump_rel == JMP_ABS) && (npp < CLE_PKTRAM_SIZE)) npp += jb; buf[j++] = SET_VAL(CLE_BR_VALID, br->valid) | SET_VAL(CLE_BR_NPPTR, npp) | SET_VAL(CLE_BR_JB, br->jump_bw) | SET_VAL(CLE_BR_JR, br->jump_rel) | SET_VAL(CLE_BR_OP, br->operation) | SET_VAL(CLE_BR_NNODE, br->next_node) | SET_VAL(CLE_BR_NBR, br->next_branch); buf[j++] = SET_VAL(CLE_BR_DATA, br->data) | SET_VAL(CLE_BR_MASK, br->mask); } } static int xgene_cle_poll_cmd_done(void __iomem *base, enum xgene_cle_cmd_type cmd) { u32 status, loop = 10; int ret = -EBUSY; while (loop--) { status = ioread32(base + INDCMD_STATUS); if (status & cmd) { ret = 0; break; } usleep_range(1000, 2000); } return ret; } static int xgene_cle_dram_wr(struct xgene_enet_cle *cle, u32 *data, u8 nregs, u32 index, enum xgene_cle_dram_type type, enum xgene_cle_cmd_type cmd) { enum xgene_cle_parser parser = cle->active_parser; void __iomem *base = cle->base; u32 i, j, ind_addr; u8 port, nparsers; int ret = 0; /* PTREE_RAM onwards, DRAM regions are common for all parsers */ nparsers = (type >= PTREE_RAM) ? 1 : cle->parsers; for (i = 0; i < nparsers; i++) { port = i; if ((type < PTREE_RAM) && (parser != PARSER_ALL)) port = parser; ind_addr = XGENE_CLE_DRAM(type + (port * 4)) | index; iowrite32(ind_addr, base + INDADDR); for (j = 0; j < nregs; j++) iowrite32(data[j], base + DATA_RAM0 + (j * 4)); iowrite32(cmd, base + INDCMD); ret = xgene_cle_poll_cmd_done(base, cmd); if (ret) break; } return ret; } static void xgene_cle_enable_ptree(struct xgene_enet_pdata *pdata, struct xgene_enet_cle *cle) { struct xgene_cle_ptree *ptree = &cle->ptree; void __iomem *addr, *base = cle->base; u32 offset = CLE_PORT_OFFSET; u32 i; /* 1G port has to advance 4 bytes and 10G has to advance 8 bytes */ ptree->start_pkt += cle->jump_bytes; for (i = 0; i < cle->parsers; i++) { if (cle->active_parser != PARSER_ALL) addr = base + cle->active_parser * offset; else addr = base + (i * offset); iowrite32(ptree->start_node & 0x3fff, addr + SNPTR0); iowrite32(ptree->start_pkt & 0x1ff, addr + SPPTR0); } } static int xgene_cle_setup_dbptr(struct xgene_enet_pdata *pdata, struct xgene_enet_cle *cle) { struct xgene_cle_ptree *ptree = &cle->ptree; u32 buf[CLE_DRAM_REGS]; u32 i; int ret; memset(buf, 0, sizeof(buf)); for (i = 0; i < ptree->num_dbptr; i++) { xgene_cle_dbptr_to_hw(pdata, &ptree->dbptr[i], buf); ret = xgene_cle_dram_wr(cle, buf, 6, i + ptree->start_dbptr, DB_RAM, CLE_CMD_WR); if (ret) return ret; } return 0; } static const struct xgene_cle_ptree_ewdn xgene_init_ptree_dn[] = { { /* PKT_TYPE_NODE */ .node_type = EWDN, .last_node = 0, .hdr_len_store = 1, .hdr_extn = NO_BYTE, .byte_store = NO_BYTE, .search_byte_store = NO_BYTE, .result_pointer = DB_RES_DROP, .num_branches = 2, .branch = { { /* IPV4 */ .valid = 1, .next_packet_pointer = 22, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = PKT_PROT_NODE, .next_branch = 0, .data = 0x8, .mask = 0x0 }, { .valid = 0, .next_packet_pointer = 262, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = LAST_NODE, .next_branch = 0, .data = 0x0, .mask = 0xffff } }, }, { /* PKT_PROT_NODE */ .node_type = EWDN, .last_node = 0, .hdr_len_store = 1, .hdr_extn = NO_BYTE, .byte_store = NO_BYTE, .search_byte_store = NO_BYTE, .result_pointer = DB_RES_DROP, .num_branches = 3, .branch = { { /* TCP */ .valid = 1, .next_packet_pointer = 26, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_TCP_NODE, .next_branch = 0, .data = 0x0600, .mask = 0x00ff }, { /* UDP */ .valid = 1, .next_packet_pointer = 26, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_UDP_NODE, .next_branch = 0, .data = 0x1100, .mask = 0x00ff }, { .valid = 0, .next_packet_pointer = 26, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_OTHERS_NODE, .next_branch = 0, .data = 0x0, .mask = 0xffff } } }, { /* RSS_IPV4_TCP_NODE */ .node_type = EWDN, .last_node = 0, .hdr_len_store = 1, .hdr_extn = NO_BYTE, .byte_store = NO_BYTE, .search_byte_store = BOTH_BYTES, .result_pointer = DB_RES_DROP, .num_branches = 6, .branch = { { /* SRC IPV4 B01 */ .valid = 0, .next_packet_pointer = 28, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_TCP_NODE, .next_branch = 1, .data = 0x0, .mask = 0xffff }, { /* SRC IPV4 B23 */ .valid = 0, .next_packet_pointer = 30, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_TCP_NODE, .next_branch = 2, .data = 0x0, .mask = 0xffff }, { /* DST IPV4 B01 */ .valid = 0, .next_packet_pointer = 32, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_TCP_NODE, .next_branch = 3, .data = 0x0, .mask = 0xffff }, { /* DST IPV4 B23 */ .valid = 0, .next_packet_pointer = 34, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_TCP_NODE, .next_branch = 4, .data = 0x0, .mask = 0xffff }, { /* TCP SRC Port */ .valid = 0, .next_packet_pointer = 36, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_TCP_NODE, .next_branch = 5, .data = 0x0, .mask = 0xffff }, { /* TCP DST Port */ .valid = 0, .next_packet_pointer = 256, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = LAST_NODE, .next_branch = 0, .data = 0x0, .mask = 0xffff } } }, { /* RSS_IPV4_UDP_NODE */ .node_type = EWDN, .last_node = 0, .hdr_len_store = 1, .hdr_extn = NO_BYTE, .byte_store = NO_BYTE, .search_byte_store = BOTH_BYTES, .result_pointer = DB_RES_DROP, .num_branches = 6, .branch = { { /* SRC IPV4 B01 */ .valid = 0, .next_packet_pointer = 28, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_UDP_NODE, .next_branch = 1, .data = 0x0, .mask = 0xffff }, { /* SRC IPV4 B23 */ .valid = 0, .next_packet_pointer = 30, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_UDP_NODE, .next_branch = 2, .data = 0x0, .mask = 0xffff }, { /* DST IPV4 B01 */ .valid = 0, .next_packet_pointer = 32, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_UDP_NODE, .next_branch = 3, .data = 0x0, .mask = 0xffff }, { /* DST IPV4 B23 */ .valid = 0, .next_packet_pointer = 34, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_UDP_NODE, .next_branch = 4, .data = 0x0, .mask = 0xffff }, { /* TCP SRC Port */ .valid = 0, .next_packet_pointer = 36, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_UDP_NODE, .next_branch = 5, .data = 0x0, .mask = 0xffff }, { /* TCP DST Port */ .valid = 0, .next_packet_pointer = 258, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = LAST_NODE, .next_branch = 0, .data = 0x0, .mask = 0xffff } } }, { /* RSS_IPV4_OTHERS_NODE */ .node_type = EWDN, .last_node = 0, .hdr_len_store = 1, .hdr_extn = NO_BYTE, .byte_store = NO_BYTE, .search_byte_store = BOTH_BYTES, .result_pointer = DB_RES_DROP, .num_branches = 6, .branch = { { /* SRC IPV4 B01 */ .valid = 0, .next_packet_pointer = 28, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_OTHERS_NODE, .next_branch = 1, .data = 0x0, .mask = 0xffff }, { /* SRC IPV4 B23 */ .valid = 0, .next_packet_pointer = 30, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_OTHERS_NODE, .next_branch = 2, .data = 0x0, .mask = 0xffff }, { /* DST IPV4 B01 */ .valid = 0, .next_packet_pointer = 32, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_OTHERS_NODE, .next_branch = 3, .data = 0x0, .mask = 0xffff }, { /* DST IPV4 B23 */ .valid = 0, .next_packet_pointer = 34, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_OTHERS_NODE, .next_branch = 4, .data = 0x0, .mask = 0xffff }, { /* TCP SRC Port */ .valid = 0, .next_packet_pointer = 36, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = RSS_IPV4_OTHERS_NODE, .next_branch = 5, .data = 0x0, .mask = 0xffff }, { /* TCP DST Port */ .valid = 0, .next_packet_pointer = 260, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = LAST_NODE, .next_branch = 0, .data = 0x0, .mask = 0xffff } } }, { /* LAST NODE */ .node_type = EWDN, .last_node = 1, .hdr_len_store = 1, .hdr_extn = NO_BYTE, .byte_store = NO_BYTE, .search_byte_store = NO_BYTE, .result_pointer = DB_RES_DROP, .num_branches = 1, .branch = { { .valid = 0, .next_packet_pointer = 0, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, .next_node = MAX_NODES, .next_branch = 0, .data = 0, .mask = 0xffff } } } }; static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata, struct xgene_enet_cle *cle) { struct xgene_cle_ptree *ptree = &cle->ptree; const struct xgene_cle_ptree_ewdn *dn = xgene_init_ptree_dn; int num_dn = ARRAY_SIZE(xgene_init_ptree_dn); struct xgene_cle_ptree_kn *kn = ptree->kn; u32 buf[CLE_DRAM_REGS]; int i, j, ret; memset(buf, 0, sizeof(buf)); for (i = 0; i < num_dn; i++) { xgene_cle_dn_to_hw(&dn[i], buf, cle->jump_bytes); ret = xgene_cle_dram_wr(cle, buf, 17, i + ptree->start_node, PTREE_RAM, CLE_CMD_WR); if (ret) return ret; } /* continue node index for key node */ memset(buf, 0, sizeof(buf)); for (j = i; j < (ptree->num_kn + num_dn); j++) { xgene_cle_kn_to_hw(&kn[j - num_dn], buf); ret = xgene_cle_dram_wr(cle, buf, 17, j + ptree->start_node, PTREE_RAM, CLE_CMD_WR); if (ret) return ret; } return 0; } static int xgene_cle_setup_ptree(struct xgene_enet_pdata *pdata, struct xgene_enet_cle *cle) { int ret; ret = xgene_cle_setup_node(pdata, cle); if (ret) return ret; ret = xgene_cle_setup_dbptr(pdata, cle); if (ret) return ret; xgene_cle_enable_ptree(pdata, cle); return 0; } static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata *pdata, struct xgene_enet_cle *enet_cle, struct xgene_cle_dbptr *dbptr, u32 index, u8 priority) { void __iomem *base = enet_cle->base; void __iomem *base_addr; u32 buf[CLE_DRAM_REGS]; u32 def_cls, offset; u32 i, j; memset(buf, 0, sizeof(buf)); xgene_cle_dbptr_to_hw(pdata, dbptr, buf); for (i = 0; i < enet_cle->parsers; i++) { if (enet_cle->active_parser != PARSER_ALL) { offset = enet_cle->active_parser * CLE_PORT_OFFSET; } else { offset = i * CLE_PORT_OFFSET; } base_addr = base + DFCLSRESDB00 + offset; for (j = 0; j < 6; j++) iowrite32(buf[j], base_addr + (j * 4)); def_cls = ((priority & 0x7) << 10) | (index & 0x3ff); iowrite32(def_cls, base + DFCLSRESDBPTR0 + offset); } } static int xgene_cle_set_rss_sband(struct xgene_enet_cle *cle) { u32 idx = CLE_PKTRAM_SIZE / sizeof(u32); u32 mac_hdr_len = ETH_HLEN; u32 sband, reg = 0; u32 ipv4_ihl = 5; u32 hdr_len; int ret; /* Sideband: IPV4/TCP packets */ hdr_len = (mac_hdr_len << 5) | ipv4_ihl; xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_TCP, hdr_len, &reg); sband = reg; /* Sideband: IPv4/UDP packets */ hdr_len = (mac_hdr_len << 5) | ipv4_ihl; xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4, XGENE_CLE_UDP, hdr_len, &reg); sband |= (reg << 16); ret = xgene_cle_dram_wr(cle, &sband, 1, idx, PKT_RAM, CLE_CMD_WR); if (ret) return ret; /* Sideband: IPv4/RAW packets */ hdr_len = (mac_hdr_len << 5) | ipv4_ihl; xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER, hdr_len, &reg); sband = reg; /* Sideband: Ethernet II/RAW packets */ hdr_len = (mac_hdr_len << 5); xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER, hdr_len, &reg); sband |= (reg << 16); ret = xgene_cle_dram_wr(cle, &sband, 1, idx + 1, PKT_RAM, CLE_CMD_WR); if (ret) return ret; return 0; } static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle) { u32 secret_key_ipv4[4]; /* 16 Bytes*/ int ret = 0; get_random_bytes(secret_key_ipv4, 16); ret = xgene_cle_dram_wr(cle, secret_key_ipv4, 4, 0, RSS_IPV4_HASH_SKEY, CLE_CMD_WR); return ret; } static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata) { u32 fpsel, dstqid, nfpsel, idt_reg, idx; int i, ret = 0; u16 pool_id; for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) { idx = i % pdata->rxq_cnt; pool_id = pdata->rx_ring[idx]->buf_pool->id; fpsel = xgene_enet_get_fpsel(pool_id); dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]); nfpsel = 0; if (pdata->rx_ring[idx]->page_pool) { pool_id = pdata->rx_ring[idx]->page_pool->id; nfpsel = xgene_enet_get_fpsel(pool_id); } idt_reg = 0; xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg); ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i, RSS_IDT, CLE_CMD_WR); if (ret) return ret; } ret = xgene_cle_set_rss_skeys(&pdata->cle); if (ret) return ret; return 0; } static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata) { struct xgene_enet_cle *cle = &pdata->cle; void __iomem *base = cle->base; u32 offset, val = 0; int i, ret = 0; offset = CLE_PORT_OFFSET; for (i = 0; i < cle->parsers; i++) { if (cle->active_parser != PARSER_ALL) offset = cle->active_parser * CLE_PORT_OFFSET; else offset = i * CLE_PORT_OFFSET; /* enable RSS */ val = (RSS_IPV4_12B << 1) | 0x1; writel(val, base + RSS_CTRL0 + offset); } /* setup sideband data */ ret = xgene_cle_set_rss_sband(cle); if (ret) return ret; /* setup indirection table */ ret = xgene_cle_set_rss_idt(pdata); if (ret) return ret; return 0; } static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) { struct xgene_enet_cle *enet_cle = &pdata->cle; u32 def_qid, def_fpsel, def_nxtfpsel, pool_id; struct xgene_cle_dbptr dbptr[DB_MAX_PTRS]; struct xgene_cle_ptree *ptree; struct xgene_cle_ptree_kn kn; int ret; if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) return -EINVAL; ptree = &enet_cle->ptree; ptree->start_pkt = 12; /* Ethertype */ ret = xgene_cle_setup_rss(pdata); if (ret) { netdev_err(pdata->ndev, "RSS initialization failed\n"); return ret; } def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]); pool_id = pdata->rx_ring[0]->buf_pool->id; def_fpsel = xgene_enet_get_fpsel(pool_id); def_nxtfpsel = 0; if (pdata->rx_ring[0]->page_pool) { pool_id = pdata->rx_ring[0]->page_pool->id; def_nxtfpsel = xgene_enet_get_fpsel(pool_id); } memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS); dbptr[DB_RES_ACCEPT].fpsel = def_fpsel; dbptr[DB_RES_ACCEPT].nxtfpsel = def_nxtfpsel; dbptr[DB_RES_ACCEPT].dstqid = def_qid; dbptr[DB_RES_ACCEPT].cle_priority = 1; dbptr[DB_RES_DEF].fpsel = def_fpsel; dbptr[DB_RES_DEF].nxtfpsel = def_nxtfpsel; dbptr[DB_RES_DEF].dstqid = def_qid; dbptr[DB_RES_DEF].cle_priority = 7; xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF], DB_RES_ACCEPT, 7); dbptr[DB_RES_DROP].drop = 1; memset(&kn, 0, sizeof(kn)); kn.node_type = KN; kn.num_keys = 1; kn.key[0].priority = 0; kn.key[0].result_pointer = DB_RES_ACCEPT; ptree->kn = &kn; ptree->dbptr = dbptr; ptree->num_kn = 1; ptree->num_dbptr = DB_MAX_PTRS; return xgene_cle_setup_ptree(pdata, enet_cle); } const struct xgene_cle_ops xgene_cle3in_ops = { .cle_init = xgene_enet_cle_init, };
linux-master
drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Applied Micro X-Gene SoC Ethernet Driver * * Copyright (c) 2014, Applied Micro Circuits Corporation * Authors: Iyappan Subramanian <[email protected]> */ #include <linux/ethtool.h> #include "xgene_enet_main.h" struct xgene_gstrings_stats { char name[ETH_GSTRING_LEN]; int offset; u32 addr; u32 mask; }; #define XGENE_STAT(m) { #m, offsetof(struct rtnl_link_stats64, m) } #define XGENE_EXTD_STAT(s, a, m) \ { \ .name = #s, \ .addr = a ## _ADDR, \ .mask = m \ } static const struct xgene_gstrings_stats gstrings_stats[] = { XGENE_STAT(rx_packets), XGENE_STAT(tx_packets), XGENE_STAT(rx_bytes), XGENE_STAT(tx_bytes), XGENE_STAT(rx_errors), XGENE_STAT(tx_errors), XGENE_STAT(rx_length_errors), XGENE_STAT(rx_crc_errors), XGENE_STAT(rx_frame_errors), XGENE_STAT(rx_fifo_errors) }; static const struct xgene_gstrings_stats gstrings_extd_stats[] = { XGENE_EXTD_STAT(tx_rx_64b_frame_cntr, TR64, 31), XGENE_EXTD_STAT(tx_rx_127b_frame_cntr, TR127, 31), XGENE_EXTD_STAT(tx_rx_255b_frame_cntr, TR255, 31), XGENE_EXTD_STAT(tx_rx_511b_frame_cntr, TR511, 31), XGENE_EXTD_STAT(tx_rx_1023b_frame_cntr, TR1K, 31), XGENE_EXTD_STAT(tx_rx_1518b_frame_cntr, TRMAX, 31), XGENE_EXTD_STAT(tx_rx_1522b_frame_cntr, TRMGV, 31), XGENE_EXTD_STAT(rx_fcs_error_cntr, RFCS, 16), XGENE_EXTD_STAT(rx_multicast_pkt_cntr, RMCA, 31), XGENE_EXTD_STAT(rx_broadcast_pkt_cntr, RBCA, 31), XGENE_EXTD_STAT(rx_ctrl_frame_pkt_cntr, RXCF, 16), XGENE_EXTD_STAT(rx_pause_frame_pkt_cntr, RXPF, 16), XGENE_EXTD_STAT(rx_unk_opcode_cntr, RXUO, 16), XGENE_EXTD_STAT(rx_align_err_cntr, RALN, 16), XGENE_EXTD_STAT(rx_frame_len_err_cntr, RFLR, 16), XGENE_EXTD_STAT(rx_frame_len_err_recov_cntr, DUMP, 0), XGENE_EXTD_STAT(rx_code_err_cntr, RCDE, 16), XGENE_EXTD_STAT(rx_carrier_sense_err_cntr, RCSE, 16), XGENE_EXTD_STAT(rx_undersize_pkt_cntr, RUND, 16), XGENE_EXTD_STAT(rx_oversize_pkt_cntr, ROVR, 16), XGENE_EXTD_STAT(rx_fragments_cntr, RFRG, 16), XGENE_EXTD_STAT(rx_jabber_cntr, RJBR, 16), XGENE_EXTD_STAT(rx_jabber_recov_cntr, DUMP, 0), XGENE_EXTD_STAT(rx_dropped_pkt_cntr, RDRP, 16), XGENE_EXTD_STAT(rx_overrun_cntr, DUMP, 0), XGENE_EXTD_STAT(tx_multicast_pkt_cntr, TMCA, 31), XGENE_EXTD_STAT(tx_broadcast_pkt_cntr, TBCA, 31), XGENE_EXTD_STAT(tx_pause_ctrl_frame_cntr, TXPF, 16), XGENE_EXTD_STAT(tx_defer_pkt_cntr, TDFR, 31), XGENE_EXTD_STAT(tx_excv_defer_pkt_cntr, TEDF, 31), XGENE_EXTD_STAT(tx_single_col_pkt_cntr, TSCL, 31), XGENE_EXTD_STAT(tx_multi_col_pkt_cntr, TMCL, 31), XGENE_EXTD_STAT(tx_late_col_pkt_cntr, TLCL, 31), XGENE_EXTD_STAT(tx_excv_col_pkt_cntr, TXCL, 31), XGENE_EXTD_STAT(tx_total_col_cntr, TNCL, 31), XGENE_EXTD_STAT(tx_pause_frames_hnrd_cntr, TPFH, 16), XGENE_EXTD_STAT(tx_drop_frame_cntr, TDRP, 16), XGENE_EXTD_STAT(tx_jabber_frame_cntr, TJBR, 12), XGENE_EXTD_STAT(tx_fcs_error_cntr, TFCS, 12), XGENE_EXTD_STAT(tx_ctrl_frame_cntr, TXCF, 12), XGENE_EXTD_STAT(tx_oversize_frame_cntr, TOVR, 12), XGENE_EXTD_STAT(tx_undersize_frame_cntr, TUND, 12), XGENE_EXTD_STAT(tx_fragments_cntr, TFRG, 12), XGENE_EXTD_STAT(tx_underrun_cntr, DUMP, 0) }; #define XGENE_STATS_LEN ARRAY_SIZE(gstrings_stats) #define XGENE_EXTD_STATS_LEN ARRAY_SIZE(gstrings_extd_stats) #define RFCS_IDX 7 #define RALN_IDX 13 #define RFLR_IDX 14 #define FALSE_RFLR_IDX 15 #define RUND_IDX 18 #define FALSE_RJBR_IDX 22 #define RX_OVERRUN_IDX 24 #define TFCS_IDX 38 #define TFRG_IDX 42 #define TX_UNDERRUN_IDX 43 static void xgene_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct platform_device *pdev = pdata->pdev; strcpy(info->driver, "xgene_enet"); sprintf(info->bus_info, "%s", pdev->name); } static int xgene_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *cmd) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; u32 supported; if (phy_interface_mode_is_rgmii(pdata->phy_mode)) { if (phydev == NULL) return -ENODEV; phy_ethtool_ksettings_get(phydev, cmd); return 0; } else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { if (pdata->mdio_driver) { if (!phydev) return -ENODEV; phy_ethtool_ksettings_get(phydev, cmd); return 0; } supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII; ethtool_convert_legacy_u32_to_link_mode( cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode( cmd->link_modes.advertising, supported); cmd->base.speed = SPEED_1000; cmd->base.duplex = DUPLEX_FULL; cmd->base.port = PORT_MII; cmd->base.autoneg = AUTONEG_ENABLE; } else { supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE; ethtool_convert_legacy_u32_to_link_mode( cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode( cmd->link_modes.advertising, supported); cmd->base.speed = SPEED_10000; cmd->base.duplex = DUPLEX_FULL; cmd->base.port = PORT_FIBRE; cmd->base.autoneg = AUTONEG_DISABLE; } return 0; } static int xgene_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; if (phy_interface_mode_is_rgmii(pdata->phy_mode)) { if (!phydev) return -ENODEV; return phy_ethtool_ksettings_set(phydev, cmd); } if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { if (pdata->mdio_driver) { if (!phydev) return -ENODEV; return phy_ethtool_ksettings_set(phydev, cmd); } } return -EINVAL; } static void xgene_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { int i; u8 *p = data; if (stringset != ETH_SS_STATS) return; for (i = 0; i < XGENE_STATS_LEN; i++) { memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } for (i = 0; i < XGENE_EXTD_STATS_LEN; i++) { memcpy(p, gstrings_extd_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } } static int xgene_get_sset_count(struct net_device *ndev, int sset) { if (sset != ETH_SS_STATS) return -EINVAL; return XGENE_STATS_LEN + XGENE_EXTD_STATS_LEN; } static void xgene_get_extd_stats(struct xgene_enet_pdata *pdata) { u32 rx_drop, tx_drop; u32 mask, tmp; int i; for (i = 0; i < XGENE_EXTD_STATS_LEN; i++) { tmp = xgene_enet_rd_stat(pdata, gstrings_extd_stats[i].addr); if (gstrings_extd_stats[i].mask) { mask = GENMASK(gstrings_extd_stats[i].mask - 1, 0); pdata->extd_stats[i] += (tmp & mask); } } if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { /* Errata 10GE_10 - SW should intepret RALN as 0 */ pdata->extd_stats[RALN_IDX] = 0; } else { /* Errata ENET_15 - Fixes RFCS, RFLR, TFCS counter */ pdata->extd_stats[RFCS_IDX] -= pdata->extd_stats[RALN_IDX]; pdata->extd_stats[RFLR_IDX] -= pdata->extd_stats[RUND_IDX]; pdata->extd_stats[TFCS_IDX] -= pdata->extd_stats[TFRG_IDX]; } pdata->mac_ops->get_drop_cnt(pdata, &rx_drop, &tx_drop); pdata->extd_stats[RX_OVERRUN_IDX] += rx_drop; pdata->extd_stats[TX_UNDERRUN_IDX] += tx_drop; /* Errata 10GE_8 - Update Frame recovered from Errata 10GE_8/ENET_11 */ pdata->extd_stats[FALSE_RFLR_IDX] = pdata->false_rflr; /* Errata ENET_15 - Jabber Frame recov'ed from Errata 10GE_10/ENET_15 */ pdata->extd_stats[FALSE_RJBR_IDX] = pdata->vlan_rjbr; } int xgene_extd_stats_init(struct xgene_enet_pdata *pdata) { pdata->extd_stats = devm_kmalloc_array(&pdata->pdev->dev, XGENE_EXTD_STATS_LEN, sizeof(u64), GFP_KERNEL); if (!pdata->extd_stats) return -ENOMEM; xgene_get_extd_stats(pdata); memset(pdata->extd_stats, 0, XGENE_EXTD_STATS_LEN * sizeof(u64)); return 0; } static void xgene_get_ethtool_stats(struct net_device *ndev, struct ethtool_stats *dummy, u64 *data) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct rtnl_link_stats64 stats; int i; dev_get_stats(ndev, &stats); for (i = 0; i < XGENE_STATS_LEN; i++) data[i] = *(u64 *)((char *)&stats + gstrings_stats[i].offset); xgene_get_extd_stats(pdata); for (i = 0; i < XGENE_EXTD_STATS_LEN; i++) data[i + XGENE_STATS_LEN] = pdata->extd_stats[i]; } static void xgene_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pp) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); pp->autoneg = pdata->pause_autoneg; pp->tx_pause = pdata->tx_pause; pp->rx_pause = pdata->rx_pause; } static int xgene_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pp) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; if (phy_interface_mode_is_rgmii(pdata->phy_mode) || pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { if (!phydev) return -EINVAL; if (!phy_validate_pause(phydev, pp)) return -EINVAL; pdata->pause_autoneg = pp->autoneg; pdata->tx_pause = pp->tx_pause; pdata->rx_pause = pp->rx_pause; phy_set_asym_pause(phydev, pp->rx_pause, pp->tx_pause); if (!pp->autoneg) { pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause); pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause); } } else { if (pp->autoneg) return -EINVAL; pdata->tx_pause = pp->tx_pause; pdata->rx_pause = pp->rx_pause; pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause); pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause); } return 0; } static const struct ethtool_ops xgene_ethtool_ops = { .get_drvinfo = xgene_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = xgene_get_strings, .get_sset_count = xgene_get_sset_count, .get_ethtool_stats = xgene_get_ethtool_stats, .get_link_ksettings = xgene_get_link_ksettings, .set_link_ksettings = xgene_set_link_ksettings, .get_pauseparam = xgene_get_pauseparam, .set_pauseparam = xgene_set_pauseparam }; void xgene_enet_set_ethtool_ops(struct net_device *ndev) { ndev->ethtool_ops = &xgene_ethtool_ops; }
linux-master
drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Applied Micro X-Gene SoC Ethernet Driver * * Copyright (c) 2014, Applied Micro Circuits Corporation * Authors: Iyappan Subramanian <[email protected]> * Keyur Chudgar <[email protected]> */ #include "xgene_enet_main.h" #include "xgene_enet_hw.h" #include "xgene_enet_sgmac.h" #include "xgene_enet_xgmac.h" static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val) { iowrite32(val, p->eth_csr_addr + offset); } static void xgene_enet_wr_clkrst_csr(struct xgene_enet_pdata *p, u32 offset, u32 val) { iowrite32(val, p->base_addr + offset); } static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p, u32 offset, u32 val) { iowrite32(val, p->eth_ring_if_addr + offset); } static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p, u32 offset, u32 val) { iowrite32(val, p->eth_diag_csr_addr + offset); } static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 val) { void __iomem *addr = pdata->mcx_mac_csr_addr + offset; iowrite32(val, addr); } static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset) { return ioread32(p->eth_csr_addr + offset); } static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset) { return ioread32(p->eth_diag_csr_addr + offset); } static u32 xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *p, u32 offset) { return ioread32(p->mcx_mac_csr_addr + offset); } static int xgene_enet_ecc_init(struct xgene_enet_pdata *p) { struct net_device *ndev = p->ndev; u32 data, shutdown; int i = 0; shutdown = xgene_enet_rd_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR); data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); if (!shutdown && data == ~0U) { netdev_dbg(ndev, "+ ecc_init done, skipping\n"); return 0; } xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0); do { usleep_range(100, 110); data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); if (data == ~0U) return 0; } while (++i < 10); netdev_err(ndev, "Failed to release memory from shutdown\n"); return -ENODEV; } static void xgene_sgmac_get_drop_cnt(struct xgene_enet_pdata *pdata, u32 *rx, u32 *tx) { u32 addr, count; addr = (pdata->enet_id != XGENE_ENET1) ? XG_MCX_ICM_ECM_DROP_COUNT_REG0_ADDR : ICM_ECM_DROP_COUNT_REG0_ADDR + pdata->port_id * OFFSET_4; count = xgene_enet_rd_mcx_csr(pdata, addr); *rx = ICM_DROP_COUNT(count); *tx = ECM_DROP_COUNT(count); /* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */ addr = (pdata->enet_id != XGENE_ENET1) ? XG_MCX_ECM_CONFIG0_REG_0_ADDR : ECM_CONFIG0_REG_0_ADDR + pdata->port_id * OFFSET_4; xgene_enet_rd_mcx_csr(pdata, addr); } static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p) { u32 val; val = (p->enet_id == XGENE_ENET1) ? 0xffffffff : 0; xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val); xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val); } static void xgene_mii_phy_write(struct xgene_enet_pdata *p, u8 phy_id, u32 reg, u16 data) { u32 addr, wr_data, done; int i; addr = PHY_ADDR(phy_id) | REG_ADDR(reg); xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr); wr_data = PHY_CONTROL(data); xgene_enet_wr_mac(p, MII_MGMT_CONTROL_ADDR, wr_data); for (i = 0; i < 10; i++) { done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR); if (!(done & BUSY_MASK)) return; usleep_range(10, 20); } netdev_err(p->ndev, "MII_MGMT write failed\n"); } static u32 xgene_mii_phy_read(struct xgene_enet_pdata *p, u8 phy_id, u32 reg) { u32 addr, data, done; int i; addr = PHY_ADDR(phy_id) | REG_ADDR(reg); xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr); xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); for (i = 0; i < 10; i++) { done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR); if (!(done & BUSY_MASK)) { data = xgene_enet_rd_mac(p, MII_MGMT_STATUS_ADDR); xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, 0); return data; } usleep_range(10, 20); } netdev_err(p->ndev, "MII_MGMT read failed\n"); return 0; } static void xgene_sgmac_reset(struct xgene_enet_pdata *p) { xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, SOFT_RESET1); xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, 0); } static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p) { const u8 *dev_addr = p->ndev->dev_addr; u32 addr0, addr1; addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | (dev_addr[1] << 8) | dev_addr[0]; xgene_enet_wr_mac(p, STATION_ADDR0_ADDR, addr0); addr1 = xgene_enet_rd_mac(p, STATION_ADDR1_ADDR); addr1 |= (dev_addr[5] << 24) | (dev_addr[4] << 16); xgene_enet_wr_mac(p, STATION_ADDR1_ADDR, addr1); } static u32 xgene_enet_link_status(struct xgene_enet_pdata *p) { u32 data; data = xgene_mii_phy_read(p, INT_PHY_ADDR, SGMII_BASE_PAGE_ABILITY_ADDR >> 2); if (LINK_SPEED(data) == PHY_SPEED_1000) p->phy_speed = SPEED_1000; else if (LINK_SPEED(data) == PHY_SPEED_100) p->phy_speed = SPEED_100; else p->phy_speed = SPEED_10; return data & LINK_UP; } static void xgene_sgmii_configure(struct xgene_enet_pdata *p) { xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0x8000); xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x9000); xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0); } static void xgene_sgmii_tbi_control_reset(struct xgene_enet_pdata *p) { xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0x8000); xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0); } static void xgene_sgmii_reset(struct xgene_enet_pdata *p) { u32 value; if (p->phy_speed == SPEED_UNKNOWN) return; value = xgene_mii_phy_read(p, INT_PHY_ADDR, SGMII_BASE_PAGE_ABILITY_ADDR >> 2); if (!(value & LINK_UP)) xgene_sgmii_tbi_control_reset(p); } static void xgene_sgmac_set_speed(struct xgene_enet_pdata *p) { u32 icm0_addr, icm2_addr, debug_addr; u32 icm0, icm2, intf_ctl; u32 mc2, value; xgene_sgmii_reset(p); if (p->enet_id == XGENE_ENET1) { icm0_addr = ICM_CONFIG0_REG_0_ADDR + p->port_id * OFFSET_8; icm2_addr = ICM_CONFIG2_REG_0_ADDR + p->port_id * OFFSET_4; debug_addr = DEBUG_REG_ADDR; } else { icm0_addr = XG_MCX_ICM_CONFIG0_REG_0_ADDR; icm2_addr = XG_MCX_ICM_CONFIG2_REG_0_ADDR; debug_addr = XG_DEBUG_REG_ADDR; } icm0 = xgene_enet_rd_mcx_csr(p, icm0_addr); icm2 = xgene_enet_rd_mcx_csr(p, icm2_addr); mc2 = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR); intf_ctl = xgene_enet_rd_mac(p, INTERFACE_CONTROL_ADDR); switch (p->phy_speed) { case SPEED_10: ENET_INTERFACE_MODE2_SET(&mc2, 1); intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE); CFG_MACMODE_SET(&icm0, 0); CFG_WAITASYNCRD_SET(&icm2, 500); break; case SPEED_100: ENET_INTERFACE_MODE2_SET(&mc2, 1); intf_ctl &= ~ENET_GHD_MODE; intf_ctl |= ENET_LHD_MODE; CFG_MACMODE_SET(&icm0, 1); CFG_WAITASYNCRD_SET(&icm2, 80); break; default: ENET_INTERFACE_MODE2_SET(&mc2, 2); intf_ctl &= ~ENET_LHD_MODE; intf_ctl |= ENET_GHD_MODE; CFG_MACMODE_SET(&icm0, 2); CFG_WAITASYNCRD_SET(&icm2, 16); value = xgene_enet_rd_csr(p, debug_addr); value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; xgene_enet_wr_csr(p, debug_addr, value); break; } mc2 |= FULL_DUPLEX2 | PAD_CRC; xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, mc2); xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, intf_ctl); xgene_enet_wr_mcx_csr(p, icm0_addr, icm0); xgene_enet_wr_mcx_csr(p, icm2_addr, icm2); } static void xgene_sgmac_set_frame_size(struct xgene_enet_pdata *pdata, int size) { xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size); } static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p) { u32 data, loop = 10; xgene_sgmii_configure(p); while (loop--) { data = xgene_mii_phy_read(p, INT_PHY_ADDR, SGMII_STATUS_ADDR >> 2); if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS)) break; usleep_range(1000, 2000); } if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS)) netdev_err(p->ndev, "Auto-negotiation failed\n"); } static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set) { u32 data; data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR); if (set) data |= bits; else data &= ~bits; xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data); } static void xgene_sgmac_flowctl_tx(struct xgene_enet_pdata *p, bool enable) { xgene_sgmac_rxtx(p, TX_FLOW_EN, enable); p->mac_ops->enable_tx_pause(p, enable); } static void xgene_sgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable) { xgene_sgmac_rxtx(pdata, RX_FLOW_EN, enable); } static void xgene_sgmac_init(struct xgene_enet_pdata *p) { u32 pause_thres_reg, pause_off_thres_reg; u32 enet_spare_cfg_reg, rsif_config_reg; u32 cfg_bypass_reg, rx_dv_gate_reg; u32 data, data1, data2, offset; u32 multi_dpf_reg; if (!(p->enet_id == XGENE_ENET2 && p->mdio_driver)) xgene_sgmac_reset(p); xgene_sgmii_enable_autoneg(p); xgene_sgmac_set_speed(p); xgene_sgmac_set_mac_addr(p); if (p->enet_id == XGENE_ENET1) { enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR; rsif_config_reg = RSIF_CONFIG_REG_ADDR; cfg_bypass_reg = CFG_BYPASS_ADDR; offset = p->port_id * OFFSET_4; rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR + offset; } else { enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR; rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR; cfg_bypass_reg = XG_CFG_BYPASS_ADDR; rx_dv_gate_reg = XG_MCX_RX_DV_GATE_REG_0_ADDR; } data = xgene_enet_rd_csr(p, enet_spare_cfg_reg); data |= MPA_IDLE_WITH_QMI_EMPTY; xgene_enet_wr_csr(p, enet_spare_cfg_reg, data); /* Adjust MDC clock frequency */ data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR); MGMT_CLOCK_SEL_SET(&data, 7); xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data); /* Enable drop if bufpool not available */ data = xgene_enet_rd_csr(p, rsif_config_reg); data |= CFG_RSIF_FPBUFF_TIMEOUT_EN; xgene_enet_wr_csr(p, rsif_config_reg, data); /* Configure HW pause frame generation */ multi_dpf_reg = (p->enet_id == XGENE_ENET1) ? CSR_MULTI_DPF0_ADDR : XG_MCX_MULTI_DPF0_ADDR; data = xgene_enet_rd_mcx_csr(p, multi_dpf_reg); data = (DEF_QUANTA << 16) | (data & 0xffff); xgene_enet_wr_mcx_csr(p, multi_dpf_reg, data); if (p->enet_id != XGENE_ENET1) { data = xgene_enet_rd_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR); data = (NORM_PAUSE_OPCODE << 16) | (data & 0xFFFF); xgene_enet_wr_mcx_csr(p, XG_MCX_MULTI_DPF1_ADDR, data); } pause_thres_reg = (p->enet_id == XGENE_ENET1) ? RXBUF_PAUSE_THRESH : XG_RXBUF_PAUSE_THRESH; pause_off_thres_reg = (p->enet_id == XGENE_ENET1) ? RXBUF_PAUSE_OFF_THRESH : 0; if (p->enet_id == XGENE_ENET1) { data1 = xgene_enet_rd_csr(p, pause_thres_reg); data2 = xgene_enet_rd_csr(p, pause_off_thres_reg); if (!(p->port_id % 2)) { data1 = (data1 & 0xffff0000) | DEF_PAUSE_THRES; data2 = (data2 & 0xffff0000) | DEF_PAUSE_OFF_THRES; } else { data1 = (data1 & 0xffff) | (DEF_PAUSE_THRES << 16); data2 = (data2 & 0xffff) | (DEF_PAUSE_OFF_THRES << 16); } xgene_enet_wr_csr(p, pause_thres_reg, data1); xgene_enet_wr_csr(p, pause_off_thres_reg, data2); } else { data = (DEF_PAUSE_OFF_THRES << 16) | DEF_PAUSE_THRES; xgene_enet_wr_csr(p, pause_thres_reg, data); } xgene_sgmac_flowctl_tx(p, p->tx_pause); xgene_sgmac_flowctl_rx(p, p->rx_pause); /* Bypass traffic gating */ xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84); xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX); xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0); } static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p) { xgene_sgmac_rxtx(p, RX_EN, true); } static void xgene_sgmac_tx_enable(struct xgene_enet_pdata *p) { xgene_sgmac_rxtx(p, TX_EN, true); } static void xgene_sgmac_rx_disable(struct xgene_enet_pdata *p) { xgene_sgmac_rxtx(p, RX_EN, false); } static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p) { xgene_sgmac_rxtx(p, TX_EN, false); } static int xgene_enet_reset(struct xgene_enet_pdata *p) { struct device *dev = &p->pdev->dev; if (!xgene_ring_mgr_init(p)) return -ENODEV; if (p->mdio_driver && p->enet_id == XGENE_ENET2) { xgene_enet_config_ring_if_assoc(p); return 0; } if (p->enet_id == XGENE_ENET2) xgene_enet_wr_clkrst_csr(p, XGENET_CONFIG_REG_ADDR, SGMII_EN); if (dev->of_node) { if (!IS_ERR(p->clk)) { clk_prepare_enable(p->clk); udelay(5); clk_disable_unprepare(p->clk); udelay(5); clk_prepare_enable(p->clk); udelay(5); } } else { #ifdef CONFIG_ACPI acpi_status status; status = acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev), "_RST", NULL, NULL); if (ACPI_FAILURE(status)) { acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev), "_INI", NULL, NULL); } #endif } if (!p->port_id) { xgene_enet_ecc_init(p); xgene_enet_config_ring_if_assoc(p); } return 0; } static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, u32 dst_ring_num, u16 bufpool_id, u16 nxtbufpool_id) { u32 cle_bypass_reg0, cle_bypass_reg1; u32 offset = p->port_id * MAC_OFFSET; u32 data, fpsel, nxtfpsel; if (p->enet_id == XGENE_ENET1) { cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR; cle_bypass_reg1 = CLE_BYPASS_REG1_0_ADDR; } else { cle_bypass_reg0 = XCLE_BYPASS_REG0_ADDR; cle_bypass_reg1 = XCLE_BYPASS_REG1_ADDR; } data = CFG_CLE_BYPASS_EN0; xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data); fpsel = xgene_enet_get_fpsel(bufpool_id); nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id); data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel) | CFG_CLE_NXTFPSEL0(nxtfpsel); xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data); } static void xgene_enet_clear(struct xgene_enet_pdata *pdata, struct xgene_enet_desc_ring *ring) { u32 addr, data; if (xgene_enet_is_bufpool(ring->id)) { addr = ENET_CFGSSQMIFPRESET_ADDR; data = BIT(xgene_enet_get_fpsel(ring->id)); } else { addr = ENET_CFGSSQMIWQRESET_ADDR; data = BIT(xgene_enet_ring_bufnum(ring->id)); } xgene_enet_wr_ring_if(pdata, addr, data); } static void xgene_enet_shutdown(struct xgene_enet_pdata *p) { struct device *dev = &p->pdev->dev; if (dev->of_node) { if (!IS_ERR(p->clk)) clk_disable_unprepare(p->clk); } } static void xgene_enet_link_state(struct work_struct *work) { struct xgene_enet_pdata *p = container_of(to_delayed_work(work), struct xgene_enet_pdata, link_work); struct net_device *ndev = p->ndev; u32 link, poll_interval; link = xgene_enet_link_status(p); if (link) { if (!netif_carrier_ok(ndev)) { netif_carrier_on(ndev); xgene_sgmac_set_speed(p); xgene_sgmac_rx_enable(p); xgene_sgmac_tx_enable(p); netdev_info(ndev, "Link is Up - %dMbps\n", p->phy_speed); } poll_interval = PHY_POLL_LINK_ON; } else { if (netif_carrier_ok(ndev)) { xgene_sgmac_rx_disable(p); xgene_sgmac_tx_disable(p); netif_carrier_off(ndev); netdev_info(ndev, "Link is Down\n"); } poll_interval = PHY_POLL_LINK_OFF; } schedule_delayed_work(&p->link_work, poll_interval); } static void xgene_sgmac_enable_tx_pause(struct xgene_enet_pdata *p, bool enable) { u32 data, ecm_cfg_addr; if (p->enet_id == XGENE_ENET1) { ecm_cfg_addr = (!(p->port_id % 2)) ? CSR_ECM_CFG_0_ADDR : CSR_ECM_CFG_1_ADDR; } else { ecm_cfg_addr = XG_MCX_ECM_CFG_0_ADDR; } data = xgene_enet_rd_mcx_csr(p, ecm_cfg_addr); if (enable) data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN; else data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN); xgene_enet_wr_mcx_csr(p, ecm_cfg_addr, data); } const struct xgene_mac_ops xgene_sgmac_ops = { .init = xgene_sgmac_init, .reset = xgene_sgmac_reset, .rx_enable = xgene_sgmac_rx_enable, .tx_enable = xgene_sgmac_tx_enable, .rx_disable = xgene_sgmac_rx_disable, .tx_disable = xgene_sgmac_tx_disable, .get_drop_cnt = xgene_sgmac_get_drop_cnt, .set_speed = xgene_sgmac_set_speed, .set_mac_addr = xgene_sgmac_set_mac_addr, .set_framesize = xgene_sgmac_set_frame_size, .link_state = xgene_enet_link_state, .enable_tx_pause = xgene_sgmac_enable_tx_pause, .flowctl_tx = xgene_sgmac_flowctl_tx, .flowctl_rx = xgene_sgmac_flowctl_rx }; const struct xgene_port_ops xgene_sgport_ops = { .reset = xgene_enet_reset, .clear = xgene_enet_clear, .cle_bypass = xgene_enet_cle_bypass, .shutdown = xgene_enet_shutdown };
linux-master
drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Applied Micro X-Gene SoC Ethernet v2 Driver * * Copyright (c) 2017, Applied Micro Circuits Corporation * Author(s): Iyappan Subramanian <[email protected]> * Keyur Chudgar <[email protected]> */ #include "main.h" void xge_mac_reset(struct xge_pdata *pdata) { xge_wr_csr(pdata, MAC_CONFIG_1, SOFT_RESET); xge_wr_csr(pdata, MAC_CONFIG_1, 0); } void xge_mac_set_speed(struct xge_pdata *pdata) { u32 icm0, icm2, ecm0, mc2; u32 intf_ctrl, rgmii; icm0 = xge_rd_csr(pdata, ICM_CONFIG0_REG_0); icm2 = xge_rd_csr(pdata, ICM_CONFIG2_REG_0); ecm0 = xge_rd_csr(pdata, ECM_CONFIG0_REG_0); rgmii = xge_rd_csr(pdata, RGMII_REG_0); mc2 = xge_rd_csr(pdata, MAC_CONFIG_2); intf_ctrl = xge_rd_csr(pdata, INTERFACE_CONTROL); icm2 |= CFG_WAITASYNCRD_EN; switch (pdata->phy_speed) { case SPEED_10: SET_REG_BITS(&mc2, INTF_MODE, 1); SET_REG_BITS(&intf_ctrl, HD_MODE, 0); SET_REG_BITS(&icm0, CFG_MACMODE, 0); SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 500); SET_REG_BIT(&rgmii, CFG_SPEED_125, 0); break; case SPEED_100: SET_REG_BITS(&mc2, INTF_MODE, 1); SET_REG_BITS(&intf_ctrl, HD_MODE, 1); SET_REG_BITS(&icm0, CFG_MACMODE, 1); SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 80); SET_REG_BIT(&rgmii, CFG_SPEED_125, 0); break; default: SET_REG_BITS(&mc2, INTF_MODE, 2); SET_REG_BITS(&intf_ctrl, HD_MODE, 2); SET_REG_BITS(&icm0, CFG_MACMODE, 2); SET_REG_BITS(&icm2, CFG_WAITASYNCRD, 16); SET_REG_BIT(&rgmii, CFG_SPEED_125, 1); break; } mc2 |= FULL_DUPLEX | CRC_EN | PAD_CRC; SET_REG_BITS(&ecm0, CFG_WFIFOFULLTHR, 0x32); xge_wr_csr(pdata, MAC_CONFIG_2, mc2); xge_wr_csr(pdata, INTERFACE_CONTROL, intf_ctrl); xge_wr_csr(pdata, RGMII_REG_0, rgmii); xge_wr_csr(pdata, ICM_CONFIG0_REG_0, icm0); xge_wr_csr(pdata, ICM_CONFIG2_REG_0, icm2); xge_wr_csr(pdata, ECM_CONFIG0_REG_0, ecm0); } void xge_mac_set_station_addr(struct xge_pdata *pdata) { const u8 *dev_addr = pdata->ndev->dev_addr; u32 addr0, addr1; addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | (dev_addr[1] << 8) | dev_addr[0]; addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); xge_wr_csr(pdata, STATION_ADDR0, addr0); xge_wr_csr(pdata, STATION_ADDR1, addr1); } void xge_mac_init(struct xge_pdata *pdata) { xge_mac_reset(pdata); xge_mac_set_speed(pdata); xge_mac_set_station_addr(pdata); } void xge_mac_enable(struct xge_pdata *pdata) { u32 data; data = xge_rd_csr(pdata, MAC_CONFIG_1); data |= TX_EN | RX_EN; xge_wr_csr(pdata, MAC_CONFIG_1, data); data = xge_rd_csr(pdata, MAC_CONFIG_1); } void xge_mac_disable(struct xge_pdata *pdata) { u32 data; data = xge_rd_csr(pdata, MAC_CONFIG_1); data &= ~(TX_EN | RX_EN); xge_wr_csr(pdata, MAC_CONFIG_1, data); }
linux-master
drivers/net/ethernet/apm/xgene-v2/mac.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Applied Micro X-Gene SoC Ethernet v2 Driver * * Copyright (c) 2017, Applied Micro Circuits Corporation * Author(s): Iyappan Subramanian <[email protected]> * Keyur Chudgar <[email protected]> */ #include "main.h" static int xge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data) { struct xge_pdata *pdata = bus->priv; u32 done, val = 0; u8 wait = 10; SET_REG_BITS(&val, PHY_ADDR, phy_id); SET_REG_BITS(&val, REG_ADDR, reg); xge_wr_csr(pdata, MII_MGMT_ADDRESS, val); xge_wr_csr(pdata, MII_MGMT_CONTROL, data); do { usleep_range(5, 10); done = xge_rd_csr(pdata, MII_MGMT_INDICATORS); } while ((done & MII_MGMT_BUSY) && wait--); if (done & MII_MGMT_BUSY) { dev_err(&bus->dev, "MII_MGMT write failed\n"); return -ETIMEDOUT; } return 0; } static int xge_mdio_read(struct mii_bus *bus, int phy_id, int reg) { struct xge_pdata *pdata = bus->priv; u32 data, done, val = 0; u8 wait = 10; SET_REG_BITS(&val, PHY_ADDR, phy_id); SET_REG_BITS(&val, REG_ADDR, reg); xge_wr_csr(pdata, MII_MGMT_ADDRESS, val); xge_wr_csr(pdata, MII_MGMT_COMMAND, MII_READ_CYCLE); do { usleep_range(5, 10); done = xge_rd_csr(pdata, MII_MGMT_INDICATORS); } while ((done & MII_MGMT_BUSY) && wait--); if (done & MII_MGMT_BUSY) { dev_err(&bus->dev, "MII_MGMT read failed\n"); return -ETIMEDOUT; } data = xge_rd_csr(pdata, MII_MGMT_STATUS); xge_wr_csr(pdata, MII_MGMT_COMMAND, 0); return data; } static void xge_adjust_link(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; if (phydev->link) { if (pdata->phy_speed != phydev->speed) { pdata->phy_speed = phydev->speed; xge_mac_set_speed(pdata); xge_mac_enable(pdata); phy_print_status(phydev); } } else { if (pdata->phy_speed != SPEED_UNKNOWN) { pdata->phy_speed = SPEED_UNKNOWN; xge_mac_disable(pdata); phy_print_status(phydev); } } } void xge_mdio_remove(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); struct mii_bus *mdio_bus = pdata->mdio_bus; if (ndev->phydev) phy_disconnect(ndev->phydev); if (mdio_bus->state == MDIOBUS_REGISTERED) mdiobus_unregister(mdio_bus); mdiobus_free(mdio_bus); } int xge_mdio_config(struct net_device *ndev) { __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct xge_pdata *pdata = netdev_priv(ndev); struct device *dev = &pdata->pdev->dev; struct mii_bus *mdio_bus; struct phy_device *phydev; int ret; mdio_bus = mdiobus_alloc(); if (!mdio_bus) return -ENOMEM; mdio_bus->name = "APM X-Gene Ethernet (v2) MDIO Bus"; mdio_bus->read = xge_mdio_read; mdio_bus->write = xge_mdio_write; mdio_bus->priv = pdata; mdio_bus->parent = dev; snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev)); pdata->mdio_bus = mdio_bus; mdio_bus->phy_mask = 0x1; ret = mdiobus_register(mdio_bus); if (ret) goto err; phydev = phy_find_first(mdio_bus); if (!phydev) { dev_err(dev, "no PHY found\n"); ret = -ENODEV; goto err; } phydev = phy_connect(ndev, phydev_name(phydev), &xge_adjust_link, pdata->resources.phy_mode); if (IS_ERR(phydev)) { netdev_err(ndev, "Could not attach to PHY\n"); ret = PTR_ERR(phydev); goto err; } linkmode_set_bit_array(phy_10_100_features_array, ARRAY_SIZE(phy_10_100_features_array), mask); linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask); linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask); linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask); linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask); linkmode_andnot(phydev->supported, phydev->supported, mask); linkmode_copy(phydev->advertising, phydev->supported); pdata->phy_speed = SPEED_UNKNOWN; return 0; err: xge_mdio_remove(ndev); return ret; }
linux-master
drivers/net/ethernet/apm/xgene-v2/mdio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Applied Micro X-Gene SoC Ethernet v2 Driver * * Copyright (c) 2017, Applied Micro Circuits Corporation * Author(s): Iyappan Subramanian <[email protected]> * Keyur Chudgar <[email protected]> */ #include "main.h" #define XGE_STAT(m) { #m, offsetof(struct xge_pdata, stats.m) } #define XGE_EXTD_STAT(m, n) \ { \ #m, \ n, \ 0 \ } static const struct xge_gstrings_stats gstrings_stats[] = { XGE_STAT(rx_packets), XGE_STAT(tx_packets), XGE_STAT(rx_bytes), XGE_STAT(tx_bytes), XGE_STAT(rx_errors) }; static struct xge_gstrings_extd_stats gstrings_extd_stats[] = { XGE_EXTD_STAT(tx_rx_64b_frame_cntr, TR64), XGE_EXTD_STAT(tx_rx_127b_frame_cntr, TR127), XGE_EXTD_STAT(tx_rx_255b_frame_cntr, TR255), XGE_EXTD_STAT(tx_rx_511b_frame_cntr, TR511), XGE_EXTD_STAT(tx_rx_1023b_frame_cntr, TR1K), XGE_EXTD_STAT(tx_rx_1518b_frame_cntr, TRMAX), XGE_EXTD_STAT(tx_rx_1522b_frame_cntr, TRMGV), XGE_EXTD_STAT(rx_fcs_error_cntr, RFCS), XGE_EXTD_STAT(rx_multicast_pkt_cntr, RMCA), XGE_EXTD_STAT(rx_broadcast_pkt_cntr, RBCA), XGE_EXTD_STAT(rx_ctrl_frame_pkt_cntr, RXCF), XGE_EXTD_STAT(rx_pause_frame_pkt_cntr, RXPF), XGE_EXTD_STAT(rx_unk_opcode_cntr, RXUO), XGE_EXTD_STAT(rx_align_err_cntr, RALN), XGE_EXTD_STAT(rx_frame_len_err_cntr, RFLR), XGE_EXTD_STAT(rx_code_err_cntr, RCDE), XGE_EXTD_STAT(rx_carrier_sense_err_cntr, RCSE), XGE_EXTD_STAT(rx_undersize_pkt_cntr, RUND), XGE_EXTD_STAT(rx_oversize_pkt_cntr, ROVR), XGE_EXTD_STAT(rx_fragments_cntr, RFRG), XGE_EXTD_STAT(rx_jabber_cntr, RJBR), XGE_EXTD_STAT(rx_dropped_pkt_cntr, RDRP), XGE_EXTD_STAT(tx_multicast_pkt_cntr, TMCA), XGE_EXTD_STAT(tx_broadcast_pkt_cntr, TBCA), XGE_EXTD_STAT(tx_pause_ctrl_frame_cntr, TXPF), XGE_EXTD_STAT(tx_defer_pkt_cntr, TDFR), XGE_EXTD_STAT(tx_excv_defer_pkt_cntr, TEDF), XGE_EXTD_STAT(tx_single_col_pkt_cntr, TSCL), XGE_EXTD_STAT(tx_multi_col_pkt_cntr, TMCL), XGE_EXTD_STAT(tx_late_col_pkt_cntr, TLCL), XGE_EXTD_STAT(tx_excv_col_pkt_cntr, TXCL), XGE_EXTD_STAT(tx_total_col_cntr, TNCL), XGE_EXTD_STAT(tx_pause_frames_hnrd_cntr, TPFH), XGE_EXTD_STAT(tx_drop_frame_cntr, TDRP), XGE_EXTD_STAT(tx_jabber_frame_cntr, TJBR), XGE_EXTD_STAT(tx_fcs_error_cntr, TFCS), XGE_EXTD_STAT(tx_ctrl_frame_cntr, TXCF), XGE_EXTD_STAT(tx_oversize_frame_cntr, TOVR), XGE_EXTD_STAT(tx_undersize_frame_cntr, TUND), XGE_EXTD_STAT(tx_fragments_cntr, TFRG) }; #define XGE_STATS_LEN ARRAY_SIZE(gstrings_stats) #define XGE_EXTD_STATS_LEN ARRAY_SIZE(gstrings_extd_stats) static void xge_mac_get_extd_stats(struct xge_pdata *pdata) { u32 data; int i; for (i = 0; i < XGE_EXTD_STATS_LEN; i++) { data = xge_rd_csr(pdata, gstrings_extd_stats[i].addr); gstrings_extd_stats[i].value += data; } } static void xge_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { struct xge_pdata *pdata = netdev_priv(ndev); struct platform_device *pdev = pdata->pdev; strcpy(info->driver, "xgene-enet-v2"); sprintf(info->bus_info, "%s", pdev->name); } static void xge_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { u8 *p = data; int i; if (stringset != ETH_SS_STATS) return; for (i = 0; i < XGE_STATS_LEN; i++) { memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } for (i = 0; i < XGE_EXTD_STATS_LEN; i++) { memcpy(p, gstrings_extd_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } } static int xge_get_sset_count(struct net_device *ndev, int sset) { if (sset != ETH_SS_STATS) return -EINVAL; return XGE_STATS_LEN + XGE_EXTD_STATS_LEN; } static void xge_get_ethtool_stats(struct net_device *ndev, struct ethtool_stats *dummy, u64 *data) { void *pdata = netdev_priv(ndev); int i; for (i = 0; i < XGE_STATS_LEN; i++) *data++ = *(u64 *)(pdata + gstrings_stats[i].offset); xge_mac_get_extd_stats(pdata); for (i = 0; i < XGE_EXTD_STATS_LEN; i++) *data++ = gstrings_extd_stats[i].value; } static int xge_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *cmd) { struct phy_device *phydev = ndev->phydev; if (!phydev) return -ENODEV; phy_ethtool_ksettings_get(phydev, cmd); return 0; } static int xge_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd) { struct phy_device *phydev = ndev->phydev; if (!phydev) return -ENODEV; return phy_ethtool_ksettings_set(phydev, cmd); } static const struct ethtool_ops xge_ethtool_ops = { .get_drvinfo = xge_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = xge_get_strings, .get_sset_count = xge_get_sset_count, .get_ethtool_stats = xge_get_ethtool_stats, .get_link_ksettings = xge_get_link_ksettings, .set_link_ksettings = xge_set_link_ksettings, }; void xge_set_ethtool_ops(struct net_device *ndev) { ndev->ethtool_ops = &xge_ethtool_ops; }
linux-master
drivers/net/ethernet/apm/xgene-v2/ethtool.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Applied Micro X-Gene SoC Ethernet v2 Driver * * Copyright (c) 2017, Applied Micro Circuits Corporation * Author(s): Iyappan Subramanian <[email protected]> * Keyur Chudgar <[email protected]> */ #include "main.h" static const struct acpi_device_id xge_acpi_match[]; static int xge_get_resources(struct xge_pdata *pdata) { struct platform_device *pdev; struct net_device *ndev; int phy_mode, ret = 0; struct resource *res; struct device *dev; pdev = pdata->pdev; dev = &pdev->dev; ndev = pdata->ndev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "Resource enet_csr not defined\n"); return -ENODEV; } pdata->resources.base_addr = devm_ioremap(dev, res->start, resource_size(res)); if (!pdata->resources.base_addr) { dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); return -ENOMEM; } if (device_get_ethdev_address(dev, ndev)) eth_hw_addr_random(ndev); memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); phy_mode = device_get_phy_mode(dev); if (phy_mode < 0) { dev_err(dev, "Unable to get phy-connection-type\n"); return phy_mode; } pdata->resources.phy_mode = phy_mode; if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) { dev_err(dev, "Incorrect phy-connection-type specified\n"); return -ENODEV; } ret = platform_get_irq(pdev, 0); if (ret < 0) return ret; pdata->resources.irq = ret; return 0; } static int xge_refill_buffers(struct net_device *ndev, u32 nbuf) { struct xge_pdata *pdata = netdev_priv(ndev); struct xge_desc_ring *ring = pdata->rx_ring; const u8 slots = XGENE_ENET_NUM_DESC - 1; struct device *dev = &pdata->pdev->dev; struct xge_raw_desc *raw_desc; u64 addr_lo, addr_hi; u8 tail = ring->tail; struct sk_buff *skb; dma_addr_t dma_addr; u16 len; int i; for (i = 0; i < nbuf; i++) { raw_desc = &ring->raw_desc[tail]; len = XGENE_ENET_STD_MTU; skb = netdev_alloc_skb(ndev, len); if (unlikely(!skb)) return -ENOMEM; dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dma_addr)) { netdev_err(ndev, "DMA mapping error\n"); dev_kfree_skb_any(skb); return -EINVAL; } ring->pkt_info[tail].skb = skb; ring->pkt_info[tail].dma_addr = dma_addr; addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1)); addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1)); raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) | SET_BITS(NEXT_DESC_ADDRH, addr_hi) | SET_BITS(PKT_ADDRH, upper_32_bits(dma_addr))); dma_wmb(); raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) | SET_BITS(E, 1)); tail = (tail + 1) & slots; } ring->tail = tail; return 0; } static int xge_init_hw(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); int ret; ret = xge_port_reset(ndev); if (ret) return ret; xge_port_init(ndev); pdata->nbufs = NUM_BUFS; return 0; } static irqreturn_t xge_irq(const int irq, void *data) { struct xge_pdata *pdata = data; if (napi_schedule_prep(&pdata->napi)) { xge_intr_disable(pdata); __napi_schedule(&pdata->napi); } return IRQ_HANDLED; } static int xge_request_irq(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); int ret; snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name); ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name, pdata); if (ret) netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name); return ret; } static void xge_free_irq(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); free_irq(pdata->resources.irq, pdata); } static bool is_tx_slot_available(struct xge_raw_desc *raw_desc) { if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) && (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY)) return true; return false; } static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); struct device *dev = &pdata->pdev->dev; struct xge_desc_ring *tx_ring; struct xge_raw_desc *raw_desc; static dma_addr_t dma_addr; u64 addr_lo, addr_hi; void *pkt_buf; u8 tail; u16 len; tx_ring = pdata->tx_ring; tail = tx_ring->tail; len = skb_headlen(skb); raw_desc = &tx_ring->raw_desc[tail]; if (!is_tx_slot_available(raw_desc)) { netif_stop_queue(ndev); return NETDEV_TX_BUSY; } /* Packet buffers should be 64B aligned */ pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, GFP_ATOMIC); if (unlikely(!pkt_buf)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } memcpy(pkt_buf, skb->data, len); addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1)); addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1)); raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) | SET_BITS(NEXT_DESC_ADDRH, addr_hi) | SET_BITS(PKT_ADDRH, upper_32_bits(dma_addr))); tx_ring->pkt_info[tail].skb = skb; tx_ring->pkt_info[tail].dma_addr = dma_addr; tx_ring->pkt_info[tail].pkt_buf = pkt_buf; dma_wmb(); raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) | SET_BITS(PKT_SIZE, len) | SET_BITS(E, 0)); skb_tx_timestamp(skb); xge_wr_csr(pdata, DMATXCTRL, 1); tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1); return NETDEV_TX_OK; } static bool is_tx_hw_done(struct xge_raw_desc *raw_desc) { if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) && !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0))) return true; return false; } static void xge_txc_poll(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); struct device *dev = &pdata->pdev->dev; struct xge_desc_ring *tx_ring; struct xge_raw_desc *raw_desc; dma_addr_t dma_addr; struct sk_buff *skb; void *pkt_buf; u32 data; u8 head; tx_ring = pdata->tx_ring; head = tx_ring->head; data = xge_rd_csr(pdata, DMATXSTATUS); if (!GET_BITS(TXPKTCOUNT, data)) return; while (1) { raw_desc = &tx_ring->raw_desc[head]; if (!is_tx_hw_done(raw_desc)) break; dma_rmb(); skb = tx_ring->pkt_info[head].skb; dma_addr = tx_ring->pkt_info[head].dma_addr; pkt_buf = tx_ring->pkt_info[head].pkt_buf; pdata->stats.tx_packets++; pdata->stats.tx_bytes += skb->len; dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr); dev_kfree_skb_any(skb); /* clear pktstart address and pktsize */ raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) | SET_BITS(PKT_SIZE, SLOT_EMPTY)); xge_wr_csr(pdata, DMATXSTATUS, 1); head = (head + 1) & (XGENE_ENET_NUM_DESC - 1); } if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); tx_ring->head = head; } static int xge_rx_poll(struct net_device *ndev, unsigned int budget) { struct xge_pdata *pdata = netdev_priv(ndev); struct device *dev = &pdata->pdev->dev; struct xge_desc_ring *rx_ring; struct xge_raw_desc *raw_desc; struct sk_buff *skb; dma_addr_t dma_addr; int processed = 0; u8 head, rx_error; int i, ret; u32 data; u16 len; rx_ring = pdata->rx_ring; head = rx_ring->head; data = xge_rd_csr(pdata, DMARXSTATUS); if (!GET_BITS(RXPKTCOUNT, data)) return 0; for (i = 0; i < budget; i++) { raw_desc = &rx_ring->raw_desc[head]; if (GET_BITS(E, le64_to_cpu(raw_desc->m0))) break; dma_rmb(); skb = rx_ring->pkt_info[head].skb; rx_ring->pkt_info[head].skb = NULL; dma_addr = rx_ring->pkt_info[head].dma_addr; len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)); dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU, DMA_FROM_DEVICE); rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2)); if (unlikely(rx_error)) { pdata->stats.rx_errors++; dev_kfree_skb_any(skb); goto out; } skb_put(skb, len); skb->protocol = eth_type_trans(skb, ndev); pdata->stats.rx_packets++; pdata->stats.rx_bytes += len; napi_gro_receive(&pdata->napi, skb); out: ret = xge_refill_buffers(ndev, 1); xge_wr_csr(pdata, DMARXSTATUS, 1); xge_wr_csr(pdata, DMARXCTRL, 1); if (ret) break; head = (head + 1) & (XGENE_ENET_NUM_DESC - 1); processed++; } rx_ring->head = head; return processed; } static void xge_delete_desc_ring(struct net_device *ndev, struct xge_desc_ring *ring) { struct xge_pdata *pdata = netdev_priv(ndev); struct device *dev = &pdata->pdev->dev; u16 size; if (!ring) return; size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC; if (ring->desc_addr) dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr); kfree(ring->pkt_info); kfree(ring); } static void xge_free_buffers(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); struct xge_desc_ring *ring = pdata->rx_ring; struct device *dev = &pdata->pdev->dev; struct sk_buff *skb; dma_addr_t dma_addr; int i; for (i = 0; i < XGENE_ENET_NUM_DESC; i++) { skb = ring->pkt_info[i].skb; dma_addr = ring->pkt_info[i].dma_addr; if (!skb) continue; dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); } } static void xge_delete_desc_rings(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); xge_txc_poll(ndev); xge_delete_desc_ring(ndev, pdata->tx_ring); xge_rx_poll(ndev, 64); xge_free_buffers(ndev); xge_delete_desc_ring(ndev, pdata->rx_ring); } static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); struct device *dev = &pdata->pdev->dev; struct xge_desc_ring *ring; u16 size; ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) return NULL; ring->ndev = ndev; size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC; ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr, GFP_KERNEL); if (!ring->desc_addr) goto err; ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info), GFP_KERNEL); if (!ring->pkt_info) goto err; xge_setup_desc(ring); return ring; err: xge_delete_desc_ring(ndev, ring); return NULL; } static int xge_create_desc_rings(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); struct xge_desc_ring *ring; int ret; /* create tx ring */ ring = xge_create_desc_ring(ndev); if (!ring) goto err; pdata->tx_ring = ring; xge_update_tx_desc_addr(pdata); /* create rx ring */ ring = xge_create_desc_ring(ndev); if (!ring) goto err; pdata->rx_ring = ring; xge_update_rx_desc_addr(pdata); ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC); if (ret) goto err; return 0; err: xge_delete_desc_rings(ndev); return -ENOMEM; } static int xge_open(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); int ret; ret = xge_create_desc_rings(ndev); if (ret) return ret; napi_enable(&pdata->napi); ret = xge_request_irq(ndev); if (ret) return ret; xge_intr_enable(pdata); xge_wr_csr(pdata, DMARXCTRL, 1); phy_start(ndev->phydev); xge_mac_enable(pdata); netif_start_queue(ndev); return 0; } static int xge_close(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); netif_stop_queue(ndev); xge_mac_disable(pdata); phy_stop(ndev->phydev); xge_intr_disable(pdata); xge_free_irq(ndev); napi_disable(&pdata->napi); xge_delete_desc_rings(ndev); return 0; } static int xge_napi(struct napi_struct *napi, const int budget) { struct net_device *ndev = napi->dev; struct xge_pdata *pdata; int processed; pdata = netdev_priv(ndev); xge_txc_poll(ndev); processed = xge_rx_poll(ndev, budget); if (processed < budget) { napi_complete_done(napi, processed); xge_intr_enable(pdata); } return processed; } static int xge_set_mac_addr(struct net_device *ndev, void *addr) { struct xge_pdata *pdata = netdev_priv(ndev); int ret; ret = eth_mac_addr(ndev, addr); if (ret) return ret; xge_mac_set_station_addr(pdata); return 0; } static bool is_tx_pending(struct xge_raw_desc *raw_desc) { if (!GET_BITS(E, le64_to_cpu(raw_desc->m0))) return true; return false; } static void xge_free_pending_skb(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); struct device *dev = &pdata->pdev->dev; struct xge_desc_ring *tx_ring; struct xge_raw_desc *raw_desc; dma_addr_t dma_addr; struct sk_buff *skb; void *pkt_buf; int i; tx_ring = pdata->tx_ring; for (i = 0; i < XGENE_ENET_NUM_DESC; i++) { raw_desc = &tx_ring->raw_desc[i]; if (!is_tx_pending(raw_desc)) continue; skb = tx_ring->pkt_info[i].skb; dma_addr = tx_ring->pkt_info[i].dma_addr; pkt_buf = tx_ring->pkt_info[i].pkt_buf; dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr); dev_kfree_skb_any(skb); } } static void xge_timeout(struct net_device *ndev, unsigned int txqueue) { struct xge_pdata *pdata = netdev_priv(ndev); rtnl_lock(); if (!netif_running(ndev)) goto out; netif_stop_queue(ndev); xge_intr_disable(pdata); napi_disable(&pdata->napi); xge_wr_csr(pdata, DMATXCTRL, 0); xge_txc_poll(ndev); xge_free_pending_skb(ndev); xge_wr_csr(pdata, DMATXSTATUS, ~0U); xge_setup_desc(pdata->tx_ring); xge_update_tx_desc_addr(pdata); xge_mac_init(pdata); napi_enable(&pdata->napi); xge_intr_enable(pdata); xge_mac_enable(pdata); netif_start_queue(ndev); out: rtnl_unlock(); } static void xge_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *storage) { struct xge_pdata *pdata = netdev_priv(ndev); struct xge_stats *stats = &pdata->stats; storage->tx_packets += stats->tx_packets; storage->tx_bytes += stats->tx_bytes; storage->rx_packets += stats->rx_packets; storage->rx_bytes += stats->rx_bytes; storage->rx_errors += stats->rx_errors; } static const struct net_device_ops xgene_ndev_ops = { .ndo_open = xge_open, .ndo_stop = xge_close, .ndo_start_xmit = xge_start_xmit, .ndo_set_mac_address = xge_set_mac_addr, .ndo_tx_timeout = xge_timeout, .ndo_get_stats64 = xge_get_stats64, }; static int xge_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct net_device *ndev; struct xge_pdata *pdata; int ret; ndev = alloc_etherdev(sizeof(*pdata)); if (!ndev) return -ENOMEM; pdata = netdev_priv(ndev); pdata->pdev = pdev; pdata->ndev = ndev; SET_NETDEV_DEV(ndev, dev); platform_set_drvdata(pdev, pdata); ndev->netdev_ops = &xgene_ndev_ops; ndev->features |= NETIF_F_GSO | NETIF_F_GRO; ret = xge_get_resources(pdata); if (ret) goto err; ndev->hw_features = ndev->features; xge_set_ethtool_ops(ndev); ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (ret) { netdev_err(ndev, "No usable DMA configuration\n"); goto err; } ret = xge_init_hw(ndev); if (ret) goto err; ret = xge_mdio_config(ndev); if (ret) goto err; netif_napi_add(ndev, &pdata->napi, xge_napi); ret = register_netdev(ndev); if (ret) { netdev_err(ndev, "Failed to register netdev\n"); goto err_mdio_remove; } return 0; err_mdio_remove: xge_mdio_remove(ndev); err: free_netdev(ndev); return ret; } static int xge_remove(struct platform_device *pdev) { struct xge_pdata *pdata; struct net_device *ndev; pdata = platform_get_drvdata(pdev); ndev = pdata->ndev; rtnl_lock(); if (netif_running(ndev)) dev_close(ndev); rtnl_unlock(); xge_mdio_remove(ndev); unregister_netdev(ndev); free_netdev(ndev); return 0; } static void xge_shutdown(struct platform_device *pdev) { struct xge_pdata *pdata; pdata = platform_get_drvdata(pdev); if (!pdata) return; if (!pdata->ndev) return; xge_remove(pdev); } static const struct acpi_device_id xge_acpi_match[] = { { "APMC0D80" }, { } }; MODULE_DEVICE_TABLE(acpi, xge_acpi_match); static struct platform_driver xge_driver = { .driver = { .name = "xgene-enet-v2", .acpi_match_table = ACPI_PTR(xge_acpi_match), }, .probe = xge_probe, .remove = xge_remove, .shutdown = xge_shutdown, }; module_platform_driver(xge_driver); MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver"); MODULE_AUTHOR("Iyappan Subramanian <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/net/ethernet/apm/xgene-v2/main.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Applied Micro X-Gene SoC Ethernet v2 Driver * * Copyright (c) 2017, Applied Micro Circuits Corporation * Author(s): Iyappan Subramanian <[email protected]> * Keyur Chudgar <[email protected]> */ #include "main.h" /* create circular linked list of descriptors */ void xge_setup_desc(struct xge_desc_ring *ring) { struct xge_raw_desc *raw_desc; dma_addr_t dma_h, next_dma; u16 offset; int i; for (i = 0; i < XGENE_ENET_NUM_DESC; i++) { raw_desc = &ring->raw_desc[i]; offset = (i + 1) & (XGENE_ENET_NUM_DESC - 1); next_dma = ring->dma_addr + (offset * XGENE_ENET_DESC_SIZE); raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) | SET_BITS(PKT_SIZE, SLOT_EMPTY)); dma_h = upper_32_bits(next_dma); raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, next_dma) | SET_BITS(NEXT_DESC_ADDRH, dma_h)); } } void xge_update_tx_desc_addr(struct xge_pdata *pdata) { struct xge_desc_ring *ring = pdata->tx_ring; dma_addr_t dma_addr = ring->dma_addr; xge_wr_csr(pdata, DMATXDESCL, dma_addr); xge_wr_csr(pdata, DMATXDESCH, upper_32_bits(dma_addr)); ring->head = 0; ring->tail = 0; } void xge_update_rx_desc_addr(struct xge_pdata *pdata) { struct xge_desc_ring *ring = pdata->rx_ring; dma_addr_t dma_addr = ring->dma_addr; xge_wr_csr(pdata, DMARXDESCL, dma_addr); xge_wr_csr(pdata, DMARXDESCH, upper_32_bits(dma_addr)); ring->head = 0; ring->tail = 0; } void xge_intr_enable(struct xge_pdata *pdata) { u32 data; data = RX_PKT_RCVD | TX_PKT_SENT; xge_wr_csr(pdata, DMAINTRMASK, data); } void xge_intr_disable(struct xge_pdata *pdata) { xge_wr_csr(pdata, DMAINTRMASK, 0); }
linux-master
drivers/net/ethernet/apm/xgene-v2/ring.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Applied Micro X-Gene SoC Ethernet v2 Driver * * Copyright (c) 2017, Applied Micro Circuits Corporation * Author(s): Iyappan Subramanian <[email protected]> * Keyur Chudgar <[email protected]> */ #include "main.h" void xge_wr_csr(struct xge_pdata *pdata, u32 offset, u32 val) { void __iomem *addr = pdata->resources.base_addr + offset; iowrite32(val, addr); } u32 xge_rd_csr(struct xge_pdata *pdata, u32 offset) { void __iomem *addr = pdata->resources.base_addr + offset; return ioread32(addr); } int xge_port_reset(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); struct device *dev = &pdata->pdev->dev; u32 data, wait = 10; xge_wr_csr(pdata, ENET_CLKEN, 0x3); xge_wr_csr(pdata, ENET_SRST, 0xf); xge_wr_csr(pdata, ENET_SRST, 0); xge_wr_csr(pdata, CFG_MEM_RAM_SHUTDOWN, 1); xge_wr_csr(pdata, CFG_MEM_RAM_SHUTDOWN, 0); do { usleep_range(100, 110); data = xge_rd_csr(pdata, BLOCK_MEM_RDY); } while (data != MEM_RDY && wait--); if (data != MEM_RDY) { dev_err(dev, "ECC init failed: %x\n", data); return -ETIMEDOUT; } xge_wr_csr(pdata, ENET_SHIM, DEVM_ARAUX_COH | DEVM_AWAUX_COH); return 0; } static void xge_traffic_resume(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); xge_wr_csr(pdata, CFG_FORCE_LINK_STATUS_EN, 1); xge_wr_csr(pdata, FORCE_LINK_STATUS, 1); xge_wr_csr(pdata, CFG_LINK_AGGR_RESUME, 1); xge_wr_csr(pdata, RX_DV_GATE_REG, 1); } void xge_port_init(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); pdata->phy_speed = SPEED_1000; xge_mac_init(pdata); xge_traffic_resume(ndev); }
linux-master
drivers/net/ethernet/apm/xgene-v2/enet.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * QLogic QLA3xxx NIC HBA Driver * Copyright (c) 2003-2006 QLogic Corporation */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/dmapool.h> #include <linux/mempool.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/ip.h> #include <linux/in.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/prefetch.h> #include "qla3xxx.h" #define DRV_NAME "qla3xxx" #define DRV_STRING "QLogic ISP3XXX Network Driver" #define DRV_VERSION "v2.03.00-k5" static const char ql3xxx_driver_name[] = DRV_NAME; static const char ql3xxx_driver_version[] = DRV_VERSION; #define TIMED_OUT_MSG \ "Timed out waiting for management port to get free before issuing command\n" MODULE_AUTHOR("QLogic Corporation"); MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; static int debug = -1; /* defaults above */ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); static int msi; module_param(msi, int, 0); MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); static const struct pci_device_id ql3xxx_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, /* required last entry */ {0,} }; MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); /* * These are the known PHY's which are used */ enum PHY_DEVICE_TYPE { PHY_TYPE_UNKNOWN = 0, PHY_VITESSE_VSC8211, PHY_AGERE_ET1011C, MAX_PHY_DEV_TYPES }; struct PHY_DEVICE_INFO { const enum PHY_DEVICE_TYPE phyDevice; const u32 phyIdOUI; const u16 phyIdModel; const char *name; }; static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, }; /* * Caller must take hw_lock. */ static int ql_sem_spinlock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; unsigned int seconds = 3; do { writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); value = readl(&port_regs->CommonRegs.semaphoreReg); if ((value & (sem_mask >> 16)) == sem_bits) return 0; mdelay(1000); } while (--seconds); return -1; } static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); readl(&port_regs->CommonRegs.semaphoreReg); } static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); value = readl(&port_regs->CommonRegs.semaphoreReg); return ((value & (sem_mask >> 16)) == sem_bits); } /* * Caller holds hw_lock. */ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) { int i = 0; do { if (ql_sem_lock(qdev, QL_DRVR_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 1)) { netdev_printk(KERN_DEBUG, qdev->ndev, "driver lock acquired\n"); return 1; } mdelay(1000); } while (++i < 10); netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); return 0; } static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; writel(((ISP_CONTROL_NP_MASK << 16) | page), &port_regs->CommonRegs.ispControlStatus); readl(&port_regs->CommonRegs.ispControlStatus); qdev->current_page = page; } static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) { u32 value; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); value = readl(reg); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return value; } static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) { return readl(reg); } static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) { u32 value; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (qdev->current_page != 0) ql_set_register_page(qdev, 0); value = readl(reg); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return value; } static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) { if (qdev->current_page != 0) ql_set_register_page(qdev, 0); return readl(reg); } static void ql_write_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); writel(value, reg); readl(reg); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); } static void ql_write_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { writel(value, reg); readl(reg); } static void ql_write_nvram_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { writel(value, reg); readl(reg); udelay(1); } static void ql_write_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { if (qdev->current_page != 0) ql_set_register_page(qdev, 0); writel(value, reg); readl(reg); } /* * Caller holds hw_lock. Only called during init. */ static void ql_write_page1_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { if (qdev->current_page != 1) ql_set_register_page(qdev, 1); writel(value, reg); readl(reg); } /* * Caller holds hw_lock. Only called during init. */ static void ql_write_page2_reg(struct ql3_adapter *qdev, u32 __iomem *reg, u32 value) { if (qdev->current_page != 2) ql_set_register_page(qdev, 2); writel(value, reg); readl(reg); } static void ql_disable_interrupts(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, (ISP_IMR_ENABLE_INT << 16)); } static void ql_enable_interrupts(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, ((0xff << 16) | ISP_IMR_ENABLE_INT)); } static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, struct ql_rcv_buf_cb *lrg_buf_cb) { dma_addr_t map; int err; lrg_buf_cb->next = NULL; if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; } else { qdev->lrg_buf_free_tail->next = lrg_buf_cb; qdev->lrg_buf_free_tail = lrg_buf_cb; } if (!lrg_buf_cb->skb) { lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!lrg_buf_cb->skb)) { qdev->lrg_buf_skb_check++; } else { /* * We save some space to copy the ethhdr from first * buffer */ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); map = dma_map_single(&qdev->pdev->dev, lrg_buf_cb->skb->data, qdev->lrg_buffer_len - QL_HEADER_SPACE, DMA_FROM_DEVICE); err = dma_mapping_error(&qdev->pdev->dev, map); if (err) { netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", err); dev_kfree_skb(lrg_buf_cb->skb); lrg_buf_cb->skb = NULL; qdev->lrg_buf_skb_check++; return; } lrg_buf_cb->buf_phy_addr_low = cpu_to_le32(LS_64BITS(map)); lrg_buf_cb->buf_phy_addr_high = cpu_to_le32(MS_64BITS(map)); dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - QL_HEADER_SPACE); } } qdev->lrg_buf_free_count++; } static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter *qdev) { struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; if (lrg_buf_cb != NULL) { qdev->lrg_buf_free_head = lrg_buf_cb->next; if (qdev->lrg_buf_free_head == NULL) qdev->lrg_buf_free_tail = NULL; qdev->lrg_buf_free_count--; } return lrg_buf_cb; } static u32 addrBits = EEPROM_NO_ADDR_BITS; static u32 dataBits = EEPROM_NO_DATA_BITS; static void fm93c56a_deselect(struct ql3_adapter *qdev); static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, unsigned short *value); /* * Caller holds hw_lock. */ static void fm93c56a_select(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); } /* * Caller holds hw_lock. */ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) { int i; u32 mask; u32 dataBit; u32 previousBit; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; /* Clock in a zero, then do the start bit */ ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | AUBURN_EEPROM_DO_1)); ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); mask = 1 << (FM93C56A_CMD_BITS - 1); /* Force the previous data bit to be different */ previousBit = 0xffff; for (i = 0; i < FM93C56A_CMD_BITS; i++) { dataBit = (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; if (previousBit != dataBit) { /* If the bit changed, change the DO state to match */ ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | dataBit)); previousBit = dataBit; } ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_RISE)); ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_FALL)); cmd = cmd << 1; } mask = 1 << (addrBits - 1); /* Force the previous data bit to be different */ previousBit = 0xffff; for (i = 0; i < addrBits; i++) { dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0; if (previousBit != dataBit) { /* * If the bit changed, then change the DO state to * match */ ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | dataBit)); previousBit = dataBit; } ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_RISE)); ql_write_nvram_reg(qdev, spir, (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | dataBit | AUBURN_EEPROM_CLK_FALL)); eepromAddr = eepromAddr << 1; } } /* * Caller holds hw_lock. */ static void fm93c56a_deselect(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); } /* * Caller holds hw_lock. */ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) { int i; u32 data = 0; u32 dataBit; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; /* Read the data bits */ /* The first bit is a dummy. Clock right over it. */ for (i = 0; i < dataBits; i++) { ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data | AUBURN_EEPROM_CLK_RISE); ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data | AUBURN_EEPROM_CLK_FALL); dataBit = (ql_read_common_reg(qdev, spir) & AUBURN_EEPROM_DI_1) ? 1 : 0; data = (data << 1) | dataBit; } *value = (u16)data; } /* * Caller holds hw_lock. */ static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, unsigned short *value) { fm93c56a_select(qdev); fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); fm93c56a_datain(qdev, value); fm93c56a_deselect(qdev); } static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) { __le16 buf[ETH_ALEN / 2]; buf[0] = cpu_to_le16(addr[0]); buf[1] = cpu_to_le16(addr[1]); buf[2] = cpu_to_le16(addr[2]); eth_hw_addr_set(ndev, (u8 *)buf); } static int ql_get_nvram_params(struct ql3_adapter *qdev) { u16 *pEEPROMData; u16 checksum = 0; u32 index; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); pEEPROMData = (u16 *)&qdev->nvram_data; qdev->eeprom_cmd_data = 0; if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 10)) { pr_err("%s: Failed ql_sem_spinlock()\n", __func__); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return -1; } for (index = 0; index < EEPROM_SIZE; index++) { eeprom_readword(qdev, index, pEEPROMData); checksum += *pEEPROMData; pEEPROMData++; } ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); if (checksum != 0) { netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", checksum); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return -1; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return checksum; } static const u32 PHYAddr[2] = { PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS }; static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 temp; int count = 1000; while (count) { temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); if (!(temp & MAC_MII_STATUS_BSY)) return 0; udelay(10); count--; } return -1; } static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 scanControl; if (qdev->numPorts > 1) { /* Auto scan will cycle through multiple ports */ scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; } else { scanControl = MAC_MII_CONTROL_SC; } /* * Scan register 1 of PHY/PETBI, * Set up to scan both devices * The autoscan starts from the first register, completes * the last one before rolling over to the first */ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, PHYAddr[0] | MII_SCAN_REGISTER); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (scanControl) | ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); } static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) { u8 ret; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; /* See if scan mode is enabled before we turn it off */ if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { /* Scan is enabled */ ret = 1; } else { /* Scan is disabled */ ret = 0; } /* * When disabling scan mode you must first change the MII register * address */ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, PHYAddr[0] | MII_SCAN_REGISTER); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | MAC_MII_CONTROL_RC) << 16)); return ret; } static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, u16 regAddr, u16 value, u32 phyAddr) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u8 scanWasEnabled; scanWasEnabled = ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, phyAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); /* Wait for write to complete 9/10/04 SJP */ if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } if (scanWasEnabled) ql_mii_enable_scan_mode(qdev); return 0; } static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, u16 *value, u32 phyAddr) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u8 scanWasEnabled; u32 temp; scanWasEnabled = ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, phyAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16)); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); /* Wait for the read to complete */ if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); *value = (u16) temp; if (scanWasEnabled) ql_mii_enable_scan_mode(qdev); return 0; } static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, qdev->PHYAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); /* Wait for write to complete. */ if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } ql_mii_enable_scan_mode(qdev); return 0; } static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) { u32 temp; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; ql_mii_disable_scan_mode(qdev); if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, qdev->PHYAddr | regAddr); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16)); ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); /* Wait for the read to complete */ if (ql_wait_for_mii_ready(qdev)) { netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); return -1; } temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); *value = (u16) temp; ql_mii_enable_scan_mode(qdev); return 0; } static void ql_petbi_reset(struct ql3_adapter *qdev) { ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); } static void ql_petbi_start_neg(struct ql3_adapter *qdev) { u16 reg; /* Enable Auto-negotiation sense */ ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg); reg |= PETBI_TBI_AUTO_SENSE; ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); ql_mii_write_reg(qdev, PETBI_NEG_ADVER, PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); } static void ql_petbi_reset_ex(struct ql3_adapter *qdev) { ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, PHYAddr[qdev->mac_index]); } static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) { u16 reg; /* Enable Auto-negotiation sense */ ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg, PHYAddr[qdev->mac_index]); reg |= PETBI_TBI_AUTO_SENSE; ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, PHYAddr[qdev->mac_index]); } static void ql_petbi_init(struct ql3_adapter *qdev) { ql_petbi_reset(qdev); ql_petbi_start_neg(qdev); } static void ql_petbi_init_ex(struct ql3_adapter *qdev) { ql_petbi_reset_ex(qdev); ql_petbi_start_neg_ex(qdev); } static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) { u16 reg; if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0) return 0; return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; } static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) { netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); /* power down device bit 11 = 1 */ ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); /* enable diagnostic mode bit 2 = 1 */ ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); /* 1000MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); /* 1000MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); /* 100MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); /* 100MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); /* 10MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); /* 10MB amplitude adjust (see Agere errata) */ ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); /* point to hidden reg 0x2806 */ ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); /* Write new PHYAD w/bit 5 set */ ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); /* * Disable diagnostic mode bit 2 = 0 * Power up device bit 11 = 0 * Link up (on) and activity (blink) */ ql_mii_write_reg(qdev, 0x12, 0x840a); ql_mii_write_reg(qdev, 0x00, 0x1140); ql_mii_write_reg(qdev, 0x1c, 0xfaf0); } static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, u16 phyIdReg0, u16 phyIdReg1) { enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; u32 oui; u16 model; int i; if (phyIdReg0 == 0xffff) return result; if (phyIdReg1 == 0xffff) return result; /* oui is split between two registers */ oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; /* Scan table for this PHY */ for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel)) { netdev_info(qdev->ndev, "Phy: %s\n", PHY_DEVICES[i].name); result = PHY_DEVICES[i].phyDevice; break; } } return result; } static int ql_phy_get_speed(struct ql3_adapter *qdev) { u16 reg; switch (qdev->phyType) { case PHY_AGERE_ET1011C: { if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0) return 0; reg = (reg >> 8) & 3; break; } default: if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0) return 0; reg = (((reg & 0x18) >> 3) & 3); } switch (reg) { case 2: return SPEED_1000; case 1: return SPEED_100; case 0: return SPEED_10; default: return -1; } } static int ql_is_full_dup(struct ql3_adapter *qdev) { u16 reg; switch (qdev->phyType) { case PHY_AGERE_ET1011C: { if (ql_mii_read_reg(qdev, 0x1A, &reg)) return 0; return ((reg & 0x0080) && (reg & 0x1000)) != 0; } case PHY_VITESSE_VSC8211: default: { if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0) return 0; return (reg & PHY_AUX_DUPLEX_STAT) != 0; } } } static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) { u16 reg; if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0) return 0; return (reg & PHY_NEG_PAUSE) != 0; } static int PHY_Setup(struct ql3_adapter *qdev) { u16 reg1; u16 reg2; bool agereAddrChangeNeeded = false; u32 miiAddr = 0; int err; /* Determine the PHY we are using by reading the ID's */ err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1); if (err != 0) { netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); return err; } err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2); if (err != 0) { netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); return err; } /* Check if we have a Agere PHY */ if ((reg1 == 0xffff) || (reg2 == 0xffff)) { /* Determine which MII address we should be using determined by the index of the card */ if (qdev->mac_index == 0) miiAddr = MII_AGERE_ADDR_1; else miiAddr = MII_AGERE_ADDR_2; err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr); if (err != 0) { netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG after Agere detected\n"); return err; } err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr); if (err != 0) { netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); return err; } /* We need to remember to initialize the Agere PHY */ agereAddrChangeNeeded = true; } /* Determine the particular PHY we have on board to apply PHY specific initializations */ qdev->phyType = getPhyType(qdev, reg1, reg2); if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { /* need this here so address gets changed */ phyAgereSpecificInit(qdev, miiAddr); } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { netdev_err(qdev->ndev, "PHY is unknown\n"); return -EIO; } return 0; } /* * Caller holds hw_lock. */ static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); else value = (MAC_CONFIG_REG_PE << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); else value = (MAC_CONFIG_REG_SR << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); else value = (MAC_CONFIG_REG_GM << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); else value = (MAC_CONFIG_REG_FD << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; if (enable) value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); else value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); else ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); } /* * Caller holds hw_lock. */ static int ql_is_fiber(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_SM0; break; case 1: bitToCheck = PORT_STATUS_SM1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); return (temp & bitToCheck) != 0; } static int ql_is_auto_cfg(struct ql3_adapter *qdev) { u16 reg; ql_mii_read_reg(qdev, 0x00, &reg); return (reg & 0x1000) != 0; } /* * Caller holds hw_lock. */ static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_AC0; break; case 1: bitToCheck = PORT_STATUS_AC1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); if (temp & bitToCheck) { netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); return 1; } netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); return 0; } /* * ql_is_neg_pause() returns 1 if pause was negotiated to be on */ static int ql_is_neg_pause(struct ql3_adapter *qdev) { if (ql_is_fiber(qdev)) return ql_is_petbi_neg_pause(qdev); else return ql_is_phy_neg_pause(qdev); } static int ql_auto_neg_error(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_AE0; break; case 1: bitToCheck = PORT_STATUS_AE1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); return (temp & bitToCheck) != 0; } static u32 ql_get_link_speed(struct ql3_adapter *qdev) { if (ql_is_fiber(qdev)) return SPEED_1000; else return ql_phy_get_speed(qdev); } static int ql_is_link_full_dup(struct ql3_adapter *qdev) { if (ql_is_fiber(qdev)) return 1; else return ql_is_full_dup(qdev); } /* * Caller holds hw_lock. */ static int ql_link_down_detect(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = ISP_CONTROL_LINK_DN_0; break; case 1: bitToCheck = ISP_CONTROL_LINK_DN_1; break; } temp = ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); return (temp & bitToCheck) != 0; } /* * Caller holds hw_lock. */ static int ql_link_down_detect_clear(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; switch (qdev->mac_index) { case 0: ql_write_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus, (ISP_CONTROL_LINK_DN_0) | (ISP_CONTROL_LINK_DN_0 << 16)); break; case 1: ql_write_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus, (ISP_CONTROL_LINK_DN_1) | (ISP_CONTROL_LINK_DN_1 << 16)); break; default: return 1; } return 0; } /* * Caller holds hw_lock. */ static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_F1_ENABLED; break; case 1: bitToCheck = PORT_STATUS_F3_ENABLED; break; default: break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); if (temp & bitToCheck) { netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "not link master\n"); return 0; } netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); return 1; } static void ql_phy_reset_ex(struct ql3_adapter *qdev) { ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, PHYAddr[qdev->mac_index]); } static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) { u16 reg; u16 portConfiguration; if (qdev->phyType == PHY_AGERE_ET1011C) ql_mii_write_reg(qdev, 0x13, 0x0000); /* turn off external loopback */ if (qdev->mac_index == 0) portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration; else portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration; /* Some HBA's in the field are set to 0 and they need to be reinterpreted with a default value */ if (portConfiguration == 0) portConfiguration = PORT_CONFIG_DEFAULT; /* Set the 1000 advertisements */ ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg, PHYAddr[qdev->mac_index]); reg &= ~PHY_GIG_ALL_PARAMS; if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) reg |= PHY_GIG_ADV_1000F; else reg |= PHY_GIG_ADV_1000H; } ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, PHYAddr[qdev->mac_index]); /* Set the 10/100 & pause negotiation advertisements */ ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg, PHYAddr[qdev->mac_index]); reg &= ~PHY_NEG_ALL_PARAMS; if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { if (portConfiguration & PORT_CONFIG_100MB_SPEED) reg |= PHY_NEG_ADV_100F; if (portConfiguration & PORT_CONFIG_10MB_SPEED) reg |= PHY_NEG_ADV_10F; } if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { if (portConfiguration & PORT_CONFIG_100MB_SPEED) reg |= PHY_NEG_ADV_100H; if (portConfiguration & PORT_CONFIG_10MB_SPEED) reg |= PHY_NEG_ADV_10H; } if (portConfiguration & PORT_CONFIG_1000MB_SPEED) reg |= 1; ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, PHYAddr[qdev->mac_index]); ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, PHYAddr[qdev->mac_index]); } static void ql_phy_init_ex(struct ql3_adapter *qdev) { ql_phy_reset_ex(qdev); PHY_Setup(qdev); ql_phy_start_neg_ex(qdev); } /* * Caller holds hw_lock. */ static u32 ql_get_link_state(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 bitToCheck = 0; u32 temp, linkState; switch (qdev->mac_index) { case 0: bitToCheck = PORT_STATUS_UP0; break; case 1: bitToCheck = PORT_STATUS_UP1; break; } temp = ql_read_page0_reg(qdev, &port_regs->portStatus); if (temp & bitToCheck) linkState = LS_UP; else linkState = LS_DOWN; return linkState; } static int ql_port_start(struct ql3_adapter *qdev) { if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); return -1; } if (ql_is_fiber(qdev)) { ql_petbi_init(qdev); } else { /* Copper port */ ql_phy_init_ex(qdev); } ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); return 0; } static int ql_finish_auto_neg(struct ql3_adapter *qdev) { if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) return -1; if (!ql_auto_neg_error(qdev)) { if (test_bit(QL_LINK_MASTER, &qdev->flags)) { /* configure the MAC */ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "Configuring link\n"); ql_mac_cfg_soft_reset(qdev, 1); ql_mac_cfg_gig(qdev, (ql_get_link_speed (qdev) == SPEED_1000)); ql_mac_cfg_full_dup(qdev, ql_is_link_full_dup (qdev)); ql_mac_cfg_pause(qdev, ql_is_neg_pause (qdev)); ql_mac_cfg_soft_reset(qdev, 0); /* enable the MAC */ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "Enabling mac\n"); ql_mac_enable(qdev, 1); } qdev->port_link_state = LS_UP; netif_start_queue(qdev->ndev); netif_carrier_on(qdev->ndev); netif_info(qdev, link, qdev->ndev, "Link is up at %d Mbps, %s duplex\n", ql_get_link_speed(qdev), ql_is_link_full_dup(qdev) ? "full" : "half"); } else { /* Remote error detected */ if (test_bit(QL_LINK_MASTER, &qdev->flags)) { netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "Remote error detected. Calling ql_port_start()\n"); /* * ql_port_start() is shared code and needs * to lock the PHY on it's own. */ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); if (ql_port_start(qdev)) /* Restart port */ return -1; return 0; } } ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); return 0; } static void ql_link_state_machine_work(struct work_struct *work) { struct ql3_adapter *qdev = container_of(work, struct ql3_adapter, link_state_work.work); u32 curr_link_state; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); curr_link_state = ql_get_link_state(qdev); if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { netif_info(qdev, link, qdev->ndev, "Reset in progress, skip processing link state\n"); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); /* Restart timer on 2 second interval. */ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); return; } switch (qdev->port_link_state) { default: if (test_bit(QL_LINK_MASTER, &qdev->flags)) ql_port_start(qdev); qdev->port_link_state = LS_DOWN; fallthrough; case LS_DOWN: if (curr_link_state == LS_UP) { netif_info(qdev, link, qdev->ndev, "Link is up\n"); if (ql_is_auto_neg_complete(qdev)) ql_finish_auto_neg(qdev); if (qdev->port_link_state == LS_UP) ql_link_down_detect_clear(qdev); qdev->port_link_state = LS_UP; } break; case LS_UP: /* * See if the link is currently down or went down and came * back up */ if (curr_link_state == LS_DOWN) { netif_info(qdev, link, qdev->ndev, "Link is down\n"); qdev->port_link_state = LS_DOWN; } if (ql_link_down_detect(qdev)) qdev->port_link_state = LS_DOWN; break; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); /* Restart timer on 2 second interval. */ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); } /* * Caller must take hw_lock and QL_PHY_GIO_SEM. */ static void ql_get_phy_owner(struct ql3_adapter *qdev) { if (ql_this_adapter_controls_port(qdev)) set_bit(QL_LINK_MASTER, &qdev->flags); else clear_bit(QL_LINK_MASTER, &qdev->flags); } /* * Caller must take hw_lock and QL_PHY_GIO_SEM. */ static void ql_init_scan_mode(struct ql3_adapter *qdev) { ql_mii_enable_scan_mode(qdev); if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { if (ql_this_adapter_controls_port(qdev)) ql_petbi_init_ex(qdev); } else { if (ql_this_adapter_controls_port(qdev)) ql_phy_init_ex(qdev); } } /* * MII_Setup needs to be called before taking the PHY out of reset * so that the management interface clock speed can be set properly. * It would be better if we had a way to disable MDC until after the * PHY is out of reset, but we don't have that capability. */ static int ql_mii_setup(struct ql3_adapter *qdev) { u32 reg; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) return -1; if (qdev->device_id == QL3032_DEVICE_ID) ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 0x0f00000); /* Divide 125MHz clock by 28 to meet PHY timing requirements */ reg = MAC_MII_CONTROL_CLK_SEL_DIV28; ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); return 0; } #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ SUPPORTED_FIBRE | \ SUPPORTED_Autoneg) #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ SUPPORTED_10baseT_Full | \ SUPPORTED_100baseT_Half | \ SUPPORTED_100baseT_Full | \ SUPPORTED_1000baseT_Half | \ SUPPORTED_1000baseT_Full | \ SUPPORTED_Autoneg | \ SUPPORTED_TP) \ static u32 ql_supported_modes(struct ql3_adapter *qdev) { if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) return SUPPORTED_OPTICAL_MODES; return SUPPORTED_TP_MODES; } static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) { int status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } status = ql_is_auto_cfg(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return status; } static u32 ql_get_speed(struct ql3_adapter *qdev) { u32 status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } status = ql_get_link_speed(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return status; } static int ql_get_full_dup(struct ql3_adapter *qdev) { int status; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } status = ql_is_link_full_dup(qdev); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return status; } static int ql_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *cmd) { struct ql3_adapter *qdev = netdev_priv(ndev); u32 supported, advertising; supported = ql_supported_modes(qdev); if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { cmd->base.port = PORT_FIBRE; } else { cmd->base.port = PORT_TP; cmd->base.phy_address = qdev->PHYAddr; } advertising = ql_supported_modes(qdev); cmd->base.autoneg = ql_get_auto_cfg_status(qdev); cmd->base.speed = ql_get_speed(qdev); cmd->base.duplex = ql_get_full_dup(qdev); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); return 0; } static void ql_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *drvinfo) { struct ql3_adapter *qdev = netdev_priv(ndev); strscpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver)); strscpy(drvinfo->version, ql3xxx_driver_version, sizeof(drvinfo->version)); strscpy(drvinfo->bus_info, pci_name(qdev->pdev), sizeof(drvinfo->bus_info)); } static u32 ql_get_msglevel(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); return qdev->msg_enable; } static void ql_set_msglevel(struct net_device *ndev, u32 value) { struct ql3_adapter *qdev = netdev_priv(ndev); qdev->msg_enable = value; } static void ql_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 reg; if (qdev->mac_index == 0) reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); else reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); pause->autoneg = ql_get_auto_cfg_status(qdev); pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; } static const struct ethtool_ops ql3xxx_ethtool_ops = { .get_drvinfo = ql_get_drvinfo, .get_link = ethtool_op_get_link, .get_msglevel = ql_get_msglevel, .set_msglevel = ql_set_msglevel, .get_pauseparam = ql_get_pauseparam, .get_link_ksettings = ql_get_link_ksettings, }; static int ql_populate_free_queue(struct ql3_adapter *qdev) { struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; dma_addr_t map; int err; while (lrg_buf_cb) { if (!lrg_buf_cb->skb) { lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!lrg_buf_cb->skb)) { netdev_printk(KERN_DEBUG, qdev->ndev, "Failed netdev_alloc_skb()\n"); break; } else { /* * We save some space to copy the ethhdr from * first buffer */ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); map = dma_map_single(&qdev->pdev->dev, lrg_buf_cb->skb->data, qdev->lrg_buffer_len - QL_HEADER_SPACE, DMA_FROM_DEVICE); err = dma_mapping_error(&qdev->pdev->dev, map); if (err) { netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", err); dev_kfree_skb(lrg_buf_cb->skb); lrg_buf_cb->skb = NULL; break; } lrg_buf_cb->buf_phy_addr_low = cpu_to_le32(LS_64BITS(map)); lrg_buf_cb->buf_phy_addr_high = cpu_to_le32(MS_64BITS(map)); dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - QL_HEADER_SPACE); --qdev->lrg_buf_skb_check; if (!qdev->lrg_buf_skb_check) return 1; } } lrg_buf_cb = lrg_buf_cb->next; } return 0; } /* * Caller holds hw_lock. */ static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if (qdev->small_buf_release_cnt >= 16) { while (qdev->small_buf_release_cnt >= 16) { qdev->small_buf_q_producer_index++; if (qdev->small_buf_q_producer_index == NUM_SBUFQ_ENTRIES) qdev->small_buf_q_producer_index = 0; qdev->small_buf_release_cnt -= 8; } wmb(); writel_relaxed(qdev->small_buf_q_producer_index, &port_regs->CommonRegs.rxSmallQProducerIndex); } } /* * Caller holds hw_lock. */ static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) { struct bufq_addr_element *lrg_buf_q_ele; int i; struct ql_rcv_buf_cb *lrg_buf_cb; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; if ((qdev->lrg_buf_free_count >= 8) && (qdev->lrg_buf_release_cnt >= 16)) { if (qdev->lrg_buf_skb_check) if (!ql_populate_free_queue(qdev)) return; lrg_buf_q_ele = qdev->lrg_buf_next_free; while ((qdev->lrg_buf_release_cnt >= 16) && (qdev->lrg_buf_free_count >= 8)) { for (i = 0; i < 8; i++) { lrg_buf_cb = ql_get_from_lrg_buf_free_list(qdev); lrg_buf_q_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; lrg_buf_q_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; lrg_buf_q_ele++; qdev->lrg_buf_release_cnt--; } qdev->lrg_buf_q_producer_index++; if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries) qdev->lrg_buf_q_producer_index = 0; if (qdev->lrg_buf_q_producer_index == (qdev->num_lbufq_entries - 1)) { lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; } } wmb(); qdev->lrg_buf_next_free = lrg_buf_q_ele; writel(qdev->lrg_buf_q_producer_index, &port_regs->CommonRegs.rxLargeQProducerIndex); } } static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, struct ob_mac_iocb_rsp *mac_rsp) { struct ql_tx_buf_cb *tx_cb; int i; if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { netdev_warn(qdev->ndev, "Frame too short but it was padded and sent\n"); } tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; /* Check the transmit response flags for any errors */ if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { netdev_err(qdev->ndev, "Frame too short to be legal, frame not sent\n"); qdev->ndev->stats.tx_errors++; goto frame_not_sent; } if (tx_cb->seg_count == 0) { netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id); qdev->ndev->stats.tx_errors++; goto invalid_seg_count; } dma_unmap_single(&qdev->pdev->dev, dma_unmap_addr(&tx_cb->map[0], mapaddr), dma_unmap_len(&tx_cb->map[0], maplen), DMA_TO_DEVICE); tx_cb->seg_count--; if (tx_cb->seg_count) { for (i = 1; i < tx_cb->seg_count; i++) { dma_unmap_page(&qdev->pdev->dev, dma_unmap_addr(&tx_cb->map[i], mapaddr), dma_unmap_len(&tx_cb->map[i], maplen), DMA_TO_DEVICE); } } qdev->ndev->stats.tx_packets++; qdev->ndev->stats.tx_bytes += tx_cb->skb->len; frame_not_sent: dev_kfree_skb_irq(tx_cb->skb); tx_cb->skb = NULL; invalid_seg_count: atomic_inc(&qdev->tx_count); } static void ql_get_sbuf(struct ql3_adapter *qdev) { if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) qdev->small_buf_index = 0; qdev->small_buf_release_cnt++; } static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) { struct ql_rcv_buf_cb *lrg_buf_cb = NULL; lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; qdev->lrg_buf_release_cnt++; if (++qdev->lrg_buf_index == qdev->num_large_buffers) qdev->lrg_buf_index = 0; return lrg_buf_cb; } /* * The difference between 3022 and 3032 for inbound completions: * 3022 uses two buffers per completion. The first buffer contains * (some) header info, the second the remainder of the headers plus * the data. For this chip we reserve some space at the top of the * receive buffer so that the header info in buffer one can be * prepended to the buffer two. Buffer two is the sent up while * buffer one is returned to the hardware to be reused. * 3032 receives all of it's data and headers in one buffer for a * simpler process. 3032 also supports checksum verification as * can be seen in ql_process_macip_rx_intr(). */ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) { struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; struct sk_buff *skb; u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); /* * Get the inbound address list (small buffer). */ ql_get_sbuf(qdev); if (qdev->device_id == QL3022_DEVICE_ID) lrg_buf_cb1 = ql_get_lbuf(qdev); /* start of second buffer */ lrg_buf_cb2 = ql_get_lbuf(qdev); skb = lrg_buf_cb2->skb; qdev->ndev->stats.rx_packets++; qdev->ndev->stats.rx_bytes += length; skb_put(skb, length); dma_unmap_single(&qdev->pdev->dev, dma_unmap_addr(lrg_buf_cb2, mapaddr), dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE); prefetch(skb->data); skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, qdev->ndev); napi_gro_receive(&qdev->napi, skb); lrg_buf_cb2->skb = NULL; if (qdev->device_id == QL3022_DEVICE_ID) ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); } static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) { struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; struct sk_buff *skb1 = NULL, *skb2; struct net_device *ndev = qdev->ndev; u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); u16 size = 0; /* * Get the inbound address list (small buffer). */ ql_get_sbuf(qdev); if (qdev->device_id == QL3022_DEVICE_ID) { /* start of first buffer on 3022 */ lrg_buf_cb1 = ql_get_lbuf(qdev); skb1 = lrg_buf_cb1->skb; size = ETH_HLEN; if (*((u16 *) skb1->data) != 0xFFFF) size += VLAN_ETH_HLEN - ETH_HLEN; } /* start of second buffer */ lrg_buf_cb2 = ql_get_lbuf(qdev); skb2 = lrg_buf_cb2->skb; skb_put(skb2, length); /* Just the second buffer length here. */ dma_unmap_single(&qdev->pdev->dev, dma_unmap_addr(lrg_buf_cb2, mapaddr), dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE); prefetch(skb2->data); skb_checksum_none_assert(skb2); if (qdev->device_id == QL3022_DEVICE_ID) { /* * Copy the ethhdr from first buffer to second. This * is necessary for 3022 IP completions. */ skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, skb_push(skb2, size), size); } else { u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); if (checksum & (IB_IP_IOCB_RSP_3032_ICE | IB_IP_IOCB_RSP_3032_CE)) { netdev_err(ndev, "%s: Bad checksum for this %s packet, checksum = %x\n", __func__, ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : "UDP"), checksum); } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || (checksum & IB_IP_IOCB_RSP_3032_UDP && !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { skb2->ip_summed = CHECKSUM_UNNECESSARY; } } skb2->protocol = eth_type_trans(skb2, qdev->ndev); napi_gro_receive(&qdev->napi, skb2); ndev->stats.rx_packets++; ndev->stats.rx_bytes += length; lrg_buf_cb2->skb = NULL; if (qdev->device_id == QL3022_DEVICE_ID) ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); } static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget) { struct net_rsp_iocb *net_rsp; struct net_device *ndev = qdev->ndev; int work_done = 0; /* While there are entries in the completion queue. */ while ((le32_to_cpu(*(qdev->prsp_producer_index)) != qdev->rsp_consumer_index) && (work_done < budget)) { net_rsp = qdev->rsp_current; rmb(); /* * Fix 4032 chip's undocumented "feature" where bit-8 is set * if the inbound completion is for a VLAN. */ if (qdev->device_id == QL3032_DEVICE_ID) net_rsp->opcode &= 0x7f; switch (net_rsp->opcode) { case OPCODE_OB_MAC_IOCB_FN0: case OPCODE_OB_MAC_IOCB_FN2: ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) net_rsp); break; case OPCODE_IB_MAC_IOCB: case OPCODE_IB_3032_MAC_IOCB: ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) net_rsp); work_done++; break; case OPCODE_IB_IP_IOCB: case OPCODE_IB_3032_IP_IOCB: ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) net_rsp); work_done++; break; default: { u32 *tmp = (u32 *)net_rsp; netdev_err(ndev, "Hit default case, not handled!\n" " dropping the packet, opcode = %x\n" "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", net_rsp->opcode, (unsigned long int)tmp[0], (unsigned long int)tmp[1], (unsigned long int)tmp[2], (unsigned long int)tmp[3]); } } qdev->rsp_consumer_index++; if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { qdev->rsp_consumer_index = 0; qdev->rsp_current = qdev->rsp_q_virt_addr; } else { qdev->rsp_current++; } } return work_done; } static int ql_poll(struct napi_struct *napi, int budget) { struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; int work_done; work_done = ql_tx_rx_clean(qdev, budget); if (work_done < budget && napi_complete_done(napi, work_done)) { unsigned long flags; spin_lock_irqsave(&qdev->hw_lock, flags); ql_update_small_bufq_prod_index(qdev); ql_update_lrg_bufq_prod_index(qdev); writel(qdev->rsp_consumer_index, &port_regs->CommonRegs.rspQConsumerIndex); spin_unlock_irqrestore(&qdev->hw_lock, flags); ql_enable_interrupts(qdev); } return work_done; } static irqreturn_t ql3xxx_isr(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; int handled = 1; u32 var; value = ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { spin_lock(&qdev->adapter_lock); netif_stop_queue(qdev->ndev); netif_carrier_off(qdev->ndev); ql_disable_interrupts(qdev); qdev->port_link_state = LS_DOWN; set_bit(QL_RESET_ACTIVE, &qdev->flags) ; if (value & ISP_CONTROL_FE) { /* * Chip Fatal Error. */ var = ql_read_page0_reg_l(qdev, &port_regs->PortFatalErrStatus); netdev_warn(ndev, "Resetting chip. PortFatalErrStatus register = 0x%x\n", var); set_bit(QL_RESET_START, &qdev->flags) ; } else { /* * Soft Reset Requested. */ set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; netdev_err(ndev, "Another function issued a reset to the chip. ISR value = %x\n", value); } queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); spin_unlock(&qdev->adapter_lock); } else if (value & ISP_IMR_DISABLE_CMPL_INT) { ql_disable_interrupts(qdev); if (likely(napi_schedule_prep(&qdev->napi))) __napi_schedule(&qdev->napi); } else return IRQ_NONE; return IRQ_RETVAL(handled); } /* * Get the total number of segments needed for the given number of fragments. * This is necessary because outbound address lists (OAL) will be used when * more than two frags are given. Each address list has 5 addr/len pairs. * The 5th pair in each OAL is used to point to the next OAL if more frags * are coming. That is why the frags:segment count ratio is not linear. */ static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) { if (qdev->device_id == QL3022_DEVICE_ID) return 1; if (frags <= 2) return frags + 1; else if (frags <= 6) return frags + 2; else if (frags <= 10) return frags + 3; else if (frags <= 14) return frags + 4; else if (frags <= 18) return frags + 5; return -1; } static void ql_hw_csum_setup(const struct sk_buff *skb, struct ob_mac_iocb_req *mac_iocb_ptr) { const struct iphdr *ip = ip_hdr(skb); mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); mac_iocb_ptr->ip_hdr_len = ip->ihl; if (ip->protocol == IPPROTO_TCP) { mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | OB_3032MAC_IOCB_REQ_IC; } else { mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | OB_3032MAC_IOCB_REQ_IC; } } /* * Map the buffers for this transmit. * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. */ static int ql_send_map(struct ql3_adapter *qdev, struct ob_mac_iocb_req *mac_iocb_ptr, struct ql_tx_buf_cb *tx_cb, struct sk_buff *skb) { struct oal *oal; struct oal_entry *oal_entry; int len = skb_headlen(skb); dma_addr_t map; int err; int completed_segs, i; int seg_cnt, seg = 0; int frag_cnt = (int)skb_shinfo(skb)->nr_frags; seg_cnt = tx_cb->seg_count; /* * Map the skb buffer first. */ map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE); err = dma_mapping_error(&qdev->pdev->dev, map); if (err) { netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", err); return NETDEV_TX_BUSY; } oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(len); dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); dma_unmap_len_set(&tx_cb->map[seg], maplen, len); seg++; if (seg_cnt == 1) { /* Terminate the last segment. */ oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); return NETDEV_TX_OK; } oal = tx_cb->oal; for (completed_segs = 0; completed_segs < frag_cnt; completed_segs++, seg++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; oal_entry++; /* * Check for continuation requirements. * It's strange but necessary. * Continuation entry points to outbound address list. */ if ((seg == 2 && seg_cnt > 3) || (seg == 7 && seg_cnt > 8) || (seg == 12 && seg_cnt > 13) || (seg == 17 && seg_cnt > 18)) { map = dma_map_single(&qdev->pdev->dev, oal, sizeof(struct oal), DMA_TO_DEVICE); err = dma_mapping_error(&qdev->pdev->dev, map); if (err) { netdev_err(qdev->ndev, "PCI mapping outbound address list with error: %d\n", err); goto map_error; } oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(sizeof(struct oal) | OAL_CONT_ENTRY); dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); dma_unmap_len_set(&tx_cb->map[seg], maplen, sizeof(struct oal)); oal_entry = (struct oal_entry *)oal; oal++; seg++; } map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); err = dma_mapping_error(&qdev->pdev->dev, map); if (err) { netdev_err(qdev->ndev, "PCI mapping frags failed with error: %d\n", err); goto map_error; } oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->len = cpu_to_le32(skb_frag_size(frag)); dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); } /* Terminate the last segment. */ oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); return NETDEV_TX_OK; map_error: /* A PCI mapping failed and now we will need to back out * We need to traverse through the oal's and associated pages which * have been mapped and now we must unmap them to clean up properly */ seg = 1; oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal = tx_cb->oal; for (i = 0; i < completed_segs; i++, seg++) { oal_entry++; /* * Check for continuation requirements. * It's strange but necessary. */ if ((seg == 2 && seg_cnt > 3) || (seg == 7 && seg_cnt > 8) || (seg == 12 && seg_cnt > 13) || (seg == 17 && seg_cnt > 18)) { dma_unmap_single(&qdev->pdev->dev, dma_unmap_addr(&tx_cb->map[seg], mapaddr), dma_unmap_len(&tx_cb->map[seg], maplen), DMA_TO_DEVICE); oal++; seg++; } dma_unmap_page(&qdev->pdev->dev, dma_unmap_addr(&tx_cb->map[seg], mapaddr), dma_unmap_len(&tx_cb->map[seg], maplen), DMA_TO_DEVICE); } dma_unmap_single(&qdev->pdev->dev, dma_unmap_addr(&tx_cb->map[0], mapaddr), dma_unmap_addr(&tx_cb->map[0], maplen), DMA_TO_DEVICE); return NETDEV_TX_BUSY; } /* * The difference between 3022 and 3032 sends: * 3022 only supports a simple single segment transmission. * 3032 supports checksumming and scatter/gather lists (fragments). * The 3032 supports sglists by using the 3 addr/len pairs (ALP) * in the IOCB plus a chain of outbound address lists (OAL) that * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) * will be used to point to an OAL when more ALP entries are required. * The IOCB is always the top of the chain followed by one or more * OALs (when necessary). */ static netdev_tx_t ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; struct ql_tx_buf_cb *tx_cb; u32 tot_len = skb->len; struct ob_mac_iocb_req *mac_iocb_ptr; if (unlikely(atomic_read(&qdev->tx_count) < 2)) return NETDEV_TX_BUSY; tx_cb = &qdev->tx_buf[qdev->req_producer_index]; tx_cb->seg_count = ql_get_seg_count(qdev, skb_shinfo(skb)->nr_frags); if (tx_cb->seg_count == -1) { netdev_err(ndev, "%s: invalid segment count!\n", __func__); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } mac_iocb_ptr = tx_cb->queue_entry; memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); mac_iocb_ptr->opcode = qdev->mac_ob_opcode; mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; mac_iocb_ptr->flags |= qdev->mb_bit_mask; mac_iocb_ptr->transaction_id = qdev->req_producer_index; mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); tx_cb->skb = skb; if (qdev->device_id == QL3032_DEVICE_ID && skb->ip_summed == CHECKSUM_PARTIAL) ql_hw_csum_setup(skb, mac_iocb_ptr); if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { netdev_err(ndev, "%s: Could not map the segments!\n", __func__); return NETDEV_TX_BUSY; } wmb(); qdev->req_producer_index++; if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) qdev->req_producer_index = 0; wmb(); ql_write_common_reg_l(qdev, &port_regs->CommonRegs.reqQProducerIndex, qdev->req_producer_index); netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, "tx queued, slot %d, len %d\n", qdev->req_producer_index, skb->len); atomic_dec(&qdev->tx_count); return NETDEV_TX_OK; } static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) { qdev->req_q_size = (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); /* The barrier is required to ensure request and response queue * addr writes to the registers. */ wmb(); qdev->req_q_virt_addr = dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size, &qdev->req_q_phy_addr, GFP_KERNEL); if ((qdev->req_q_virt_addr == NULL) || LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { netdev_err(qdev->ndev, "reqQ failed\n"); return -ENOMEM; } qdev->rsp_q_virt_addr = dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->rsp_q_size, &qdev->rsp_q_phy_addr, GFP_KERNEL); if ((qdev->rsp_q_virt_addr == NULL) || LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { netdev_err(qdev->ndev, "rspQ allocation failed\n"); dma_free_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size, qdev->req_q_virt_addr, qdev->req_q_phy_addr); return -ENOMEM; } set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); return 0; } static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) { if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { netdev_info(qdev->ndev, "Already done\n"); return; } dma_free_coherent(&qdev->pdev->dev, qdev->req_q_size, qdev->req_q_virt_addr, qdev->req_q_phy_addr); qdev->req_q_virt_addr = NULL; dma_free_coherent(&qdev->pdev->dev, qdev->rsp_q_size, qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); qdev->rsp_q_virt_addr = NULL; clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); } static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) { /* Create Large Buffer Queue */ qdev->lrg_buf_q_size = qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); if (qdev->lrg_buf_q_size < PAGE_SIZE) qdev->lrg_buf_q_alloc_size = PAGE_SIZE; else qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers, sizeof(struct ql_rcv_buf_cb), GFP_KERNEL); if (qdev->lrg_buf == NULL) return -ENOMEM; qdev->lrg_buf_q_alloc_virt_addr = dma_alloc_coherent(&qdev->pdev->dev, qdev->lrg_buf_q_alloc_size, &qdev->lrg_buf_q_alloc_phy_addr, GFP_KERNEL); if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { netdev_err(qdev->ndev, "lBufQ failed\n"); return -ENOMEM; } qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; /* Create Small Buffer Queue */ qdev->small_buf_q_size = NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); if (qdev->small_buf_q_size < PAGE_SIZE) qdev->small_buf_q_alloc_size = PAGE_SIZE; else qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; qdev->small_buf_q_alloc_virt_addr = dma_alloc_coherent(&qdev->pdev->dev, qdev->small_buf_q_alloc_size, &qdev->small_buf_q_alloc_phy_addr, GFP_KERNEL); if (qdev->small_buf_q_alloc_virt_addr == NULL) { netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); dma_free_coherent(&qdev->pdev->dev, qdev->lrg_buf_q_alloc_size, qdev->lrg_buf_q_alloc_virt_addr, qdev->lrg_buf_q_alloc_phy_addr); return -ENOMEM; } qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); return 0; } static void ql_free_buffer_queues(struct ql3_adapter *qdev) { if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { netdev_info(qdev->ndev, "Already done\n"); return; } kfree(qdev->lrg_buf); dma_free_coherent(&qdev->pdev->dev, qdev->lrg_buf_q_alloc_size, qdev->lrg_buf_q_alloc_virt_addr, qdev->lrg_buf_q_alloc_phy_addr); qdev->lrg_buf_q_virt_addr = NULL; dma_free_coherent(&qdev->pdev->dev, qdev->small_buf_q_alloc_size, qdev->small_buf_q_alloc_virt_addr, qdev->small_buf_q_alloc_phy_addr); qdev->small_buf_q_virt_addr = NULL; clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); } static int ql_alloc_small_buffers(struct ql3_adapter *qdev) { int i; struct bufq_addr_element *small_buf_q_entry; /* Currently we allocate on one of memory and use it for smallbuffers */ qdev->small_buf_total_size = (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * QL_SMALL_BUFFER_SIZE); qdev->small_buf_virt_addr = dma_alloc_coherent(&qdev->pdev->dev, qdev->small_buf_total_size, &qdev->small_buf_phy_addr, GFP_KERNEL); if (qdev->small_buf_virt_addr == NULL) { netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); return -ENOMEM; } qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); small_buf_q_entry = qdev->small_buf_q_virt_addr; /* Initialize the small buffer queue. */ for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { small_buf_q_entry->addr_high = cpu_to_le32(qdev->small_buf_phy_addr_high); small_buf_q_entry->addr_low = cpu_to_le32(qdev->small_buf_phy_addr_low + (i * QL_SMALL_BUFFER_SIZE)); small_buf_q_entry++; } qdev->small_buf_index = 0; set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); return 0; } static void ql_free_small_buffers(struct ql3_adapter *qdev) { if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { netdev_info(qdev->ndev, "Already done\n"); return; } if (qdev->small_buf_virt_addr != NULL) { dma_free_coherent(&qdev->pdev->dev, qdev->small_buf_total_size, qdev->small_buf_virt_addr, qdev->small_buf_phy_addr); qdev->small_buf_virt_addr = NULL; } } static void ql_free_large_buffers(struct ql3_adapter *qdev) { int i = 0; struct ql_rcv_buf_cb *lrg_buf_cb; for (i = 0; i < qdev->num_large_buffers; i++) { lrg_buf_cb = &qdev->lrg_buf[i]; if (lrg_buf_cb->skb) { dev_kfree_skb(lrg_buf_cb->skb); dma_unmap_single(&qdev->pdev->dev, dma_unmap_addr(lrg_buf_cb, mapaddr), dma_unmap_len(lrg_buf_cb, maplen), DMA_FROM_DEVICE); memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); } else { break; } } } static void ql_init_large_buffers(struct ql3_adapter *qdev) { int i; struct ql_rcv_buf_cb *lrg_buf_cb; struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; for (i = 0; i < qdev->num_large_buffers; i++) { lrg_buf_cb = &qdev->lrg_buf[i]; buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; buf_addr_ele++; } qdev->lrg_buf_index = 0; qdev->lrg_buf_skb_check = 0; } static int ql_alloc_large_buffers(struct ql3_adapter *qdev) { int i; struct ql_rcv_buf_cb *lrg_buf_cb; struct sk_buff *skb; dma_addr_t map; int err; for (i = 0; i < qdev->num_large_buffers; i++) { lrg_buf_cb = &qdev->lrg_buf[i]; memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); skb = netdev_alloc_skb(qdev->ndev, qdev->lrg_buffer_len); if (unlikely(!skb)) { /* Better luck next round */ netdev_err(qdev->ndev, "large buff alloc failed for %d bytes at index %d\n", qdev->lrg_buffer_len * 2, i); ql_free_large_buffers(qdev); return -ENOMEM; } else { lrg_buf_cb->index = i; /* * We save some space to copy the ethhdr from first * buffer */ skb_reserve(skb, QL_HEADER_SPACE); map = dma_map_single(&qdev->pdev->dev, skb->data, qdev->lrg_buffer_len - QL_HEADER_SPACE, DMA_FROM_DEVICE); err = dma_mapping_error(&qdev->pdev->dev, map); if (err) { netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", err); dev_kfree_skb_irq(skb); ql_free_large_buffers(qdev); return -ENOMEM; } lrg_buf_cb->skb = skb; dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_len_set(lrg_buf_cb, maplen, qdev->lrg_buffer_len - QL_HEADER_SPACE); lrg_buf_cb->buf_phy_addr_low = cpu_to_le32(LS_64BITS(map)); lrg_buf_cb->buf_phy_addr_high = cpu_to_le32(MS_64BITS(map)); } } return 0; } static void ql_free_send_free_list(struct ql3_adapter *qdev) { struct ql_tx_buf_cb *tx_cb; int i; tx_cb = &qdev->tx_buf[0]; for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { kfree(tx_cb->oal); tx_cb->oal = NULL; tx_cb++; } } static int ql_create_send_free_list(struct ql3_adapter *qdev) { struct ql_tx_buf_cb *tx_cb; int i; struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; /* Create free list of transmit buffers */ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { tx_cb = &qdev->tx_buf[i]; tx_cb->skb = NULL; tx_cb->queue_entry = req_q_curr; req_q_curr++; tx_cb->oal = kmalloc(512, GFP_KERNEL); if (tx_cb->oal == NULL) return -ENOMEM; } return 0; } static int ql_alloc_mem_resources(struct ql3_adapter *qdev) { if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; qdev->lrg_buffer_len = NORMAL_MTU_SIZE; } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { /* * Bigger buffers, so less of them. */ qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; qdev->lrg_buffer_len = JUMBO_MTU_SIZE; } else { netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); return -ENOMEM; } qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; qdev->max_frame_size = (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; /* * First allocate a page of shared memory and use it for shadow * locations of Network Request Queue Consumer Address Register and * Network Completion Queue Producer Index Register */ qdev->shadow_reg_virt_addr = dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE, &qdev->shadow_reg_phy_addr, GFP_KERNEL); if (qdev->shadow_reg_virt_addr != NULL) { qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; qdev->req_consumer_index_phy_addr_high = MS_64BITS(qdev->shadow_reg_phy_addr); qdev->req_consumer_index_phy_addr_low = LS_64BITS(qdev->shadow_reg_phy_addr); qdev->prsp_producer_index = (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); qdev->rsp_producer_index_phy_addr_high = qdev->req_consumer_index_phy_addr_high; qdev->rsp_producer_index_phy_addr_low = qdev->req_consumer_index_phy_addr_low + 8; } else { netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); return -ENOMEM; } if (ql_alloc_net_req_rsp_queues(qdev) != 0) { netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); goto err_req_rsp; } if (ql_alloc_buffer_queues(qdev) != 0) { netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); goto err_buffer_queues; } if (ql_alloc_small_buffers(qdev) != 0) { netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); goto err_small_buffers; } if (ql_alloc_large_buffers(qdev) != 0) { netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); goto err_small_buffers; } /* Initialize the large buffer queue. */ ql_init_large_buffers(qdev); if (ql_create_send_free_list(qdev)) goto err_free_list; qdev->rsp_current = qdev->rsp_q_virt_addr; return 0; err_free_list: ql_free_send_free_list(qdev); err_small_buffers: ql_free_buffer_queues(qdev); err_buffer_queues: ql_free_net_req_rsp_queues(qdev); err_req_rsp: dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE, qdev->shadow_reg_virt_addr, qdev->shadow_reg_phy_addr); return -ENOMEM; } static void ql_free_mem_resources(struct ql3_adapter *qdev) { ql_free_send_free_list(qdev); ql_free_large_buffers(qdev); ql_free_small_buffers(qdev); ql_free_buffer_queues(qdev); ql_free_net_req_rsp_queues(qdev); if (qdev->shadow_reg_virt_addr != NULL) { dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE, qdev->shadow_reg_virt_addr, qdev->shadow_reg_phy_addr); qdev->shadow_reg_virt_addr = NULL; } } static int ql_init_misc_registers(struct ql3_adapter *qdev) { struct ql3xxx_local_ram_registers __iomem *local_ram = (void __iomem *)qdev->mem_map_registers; if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 4)) return -1; ql_write_page2_reg(qdev, &local_ram->bufletSize, qdev->nvram_data.bufletSize); ql_write_page2_reg(qdev, &local_ram->maxBufletCount, qdev->nvram_data.bufletCount); ql_write_page2_reg(qdev, &local_ram->freeBufletThresholdLow, (qdev->nvram_data.tcpWindowThreshold25 << 16) | (qdev->nvram_data.tcpWindowThreshold0)); ql_write_page2_reg(qdev, &local_ram->freeBufletThresholdHigh, qdev->nvram_data.tcpWindowThreshold50); ql_write_page2_reg(qdev, &local_ram->ipHashTableBase, (qdev->nvram_data.ipHashTableBaseHi << 16) | qdev->nvram_data.ipHashTableBaseLo); ql_write_page2_reg(qdev, &local_ram->ipHashTableCount, qdev->nvram_data.ipHashTableSize); ql_write_page2_reg(qdev, &local_ram->tcpHashTableBase, (qdev->nvram_data.tcpHashTableBaseHi << 16) | qdev->nvram_data.tcpHashTableBaseLo); ql_write_page2_reg(qdev, &local_ram->tcpHashTableCount, qdev->nvram_data.tcpHashTableSize); ql_write_page2_reg(qdev, &local_ram->ncbBase, (qdev->nvram_data.ncbTableBaseHi << 16) | qdev->nvram_data.ncbTableBaseLo); ql_write_page2_reg(qdev, &local_ram->maxNcbCount, qdev->nvram_data.ncbTableSize); ql_write_page2_reg(qdev, &local_ram->drbBase, (qdev->nvram_data.drbTableBaseHi << 16) | qdev->nvram_data.drbTableBaseLo); ql_write_page2_reg(qdev, &local_ram->maxDrbCount, qdev->nvram_data.drbTableSize); ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); return 0; } static int ql_adapter_initialize(struct ql3_adapter *qdev) { u32 value; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; struct ql3xxx_host_memory_registers __iomem *hmem_regs = (void __iomem *)port_regs; u32 delay = 10; int status = 0; if (ql_mii_setup(qdev)) return -1; /* Bring out PHY out of reset */ ql_write_common_reg(qdev, spir, (ISP_SERIAL_PORT_IF_WE | (ISP_SERIAL_PORT_IF_WE << 16))); /* Give the PHY time to come out of reset. */ mdelay(100); qdev->port_link_state = LS_DOWN; netif_carrier_off(qdev->ndev); /* V2 chip fix for ARS-39168. */ ql_write_common_reg(qdev, spir, (ISP_SERIAL_PORT_IF_SDE | (ISP_SERIAL_PORT_IF_SDE << 16))); /* Request Queue Registers */ *((u32 *)(qdev->preq_consumer_index)) = 0; atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); qdev->req_producer_index = 0; ql_write_page1_reg(qdev, &hmem_regs->reqConsumerIndexAddrHigh, qdev->req_consumer_index_phy_addr_high); ql_write_page1_reg(qdev, &hmem_regs->reqConsumerIndexAddrLow, qdev->req_consumer_index_phy_addr_low); ql_write_page1_reg(qdev, &hmem_regs->reqBaseAddrHigh, MS_64BITS(qdev->req_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->reqBaseAddrLow, LS_64BITS(qdev->req_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); /* Response Queue Registers */ *((__le16 *) (qdev->prsp_producer_index)) = 0; qdev->rsp_consumer_index = 0; qdev->rsp_current = qdev->rsp_q_virt_addr; ql_write_page1_reg(qdev, &hmem_regs->rspProducerIndexAddrHigh, qdev->rsp_producer_index_phy_addr_high); ql_write_page1_reg(qdev, &hmem_regs->rspProducerIndexAddrLow, qdev->rsp_producer_index_phy_addr_low); ql_write_page1_reg(qdev, &hmem_regs->rspBaseAddrHigh, MS_64BITS(qdev->rsp_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rspBaseAddrLow, LS_64BITS(qdev->rsp_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); /* Large Buffer Queue */ ql_write_page1_reg(qdev, &hmem_regs->rxLargeQBaseAddrHigh, MS_64BITS(qdev->lrg_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxLargeQBaseAddrLow, LS_64BITS(qdev->lrg_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries); ql_write_page1_reg(qdev, &hmem_regs->rxLargeBufferLength, qdev->lrg_buffer_len); /* Small Buffer Queue */ ql_write_page1_reg(qdev, &hmem_regs->rxSmallQBaseAddrHigh, MS_64BITS(qdev->small_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxSmallQBaseAddrLow, LS_64BITS(qdev->small_buf_q_phy_addr)); ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); ql_write_page1_reg(qdev, &hmem_regs->rxSmallBufferLength, QL_SMALL_BUFFER_SIZE); qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; qdev->small_buf_release_cnt = 8; qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; qdev->lrg_buf_release_cnt = 8; qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; qdev->small_buf_index = 0; qdev->lrg_buf_index = 0; qdev->lrg_buf_free_count = 0; qdev->lrg_buf_free_head = NULL; qdev->lrg_buf_free_tail = NULL; ql_write_common_reg(qdev, &port_regs->CommonRegs. rxSmallQProducerIndex, qdev->small_buf_q_producer_index); ql_write_common_reg(qdev, &port_regs->CommonRegs. rxLargeQProducerIndex, qdev->lrg_buf_q_producer_index); /* * Find out if the chip has already been initialized. If it has, then * we skip some of the initialization. */ clear_bit(QL_LINK_MASTER, &qdev->flags); value = ql_read_page0_reg(qdev, &port_regs->portStatus); if ((value & PORT_STATUS_IC) == 0) { /* Chip has not been configured yet, so let it rip. */ if (ql_init_misc_registers(qdev)) { status = -1; goto out; } value = qdev->nvram_data.tcpMaxWindowSize; ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 13)) { status = -1; goto out; } ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << 16) | (INTERNAL_CHIP_SD | INTERNAL_CHIP_WE))); ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); } if (qdev->mac_index) ql_write_page0_reg(qdev, &port_regs->mac1MaxFrameLengthReg, qdev->max_frame_size); else ql_write_page0_reg(qdev, &port_regs->mac0MaxFrameLengthReg, qdev->max_frame_size); if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2) << 7)) { status = -1; goto out; } PHY_Setup(qdev); ql_init_scan_mode(qdev); ql_get_phy_owner(qdev); /* Load the MAC Configuration */ /* Program lower 32 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((qdev->ndev->dev_addr[2] << 24) | (qdev->ndev->dev_addr[3] << 16) | (qdev->ndev->dev_addr[4] << 8) | qdev->ndev->dev_addr[5])); /* Program top 16 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((qdev->ndev->dev_addr[0] << 8) | qdev->ndev->dev_addr[1])); /* Enable Primary MAC */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | MAC_ADDR_INDIRECT_PTR_REG_PE)); /* Clear Primary and Secondary IP addresses */ ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, ((IP_ADDR_INDEX_REG_MASK << 16) | (qdev->mac_index << 2))); ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, ((IP_ADDR_INDEX_REG_MASK << 16) | ((qdev->mac_index << 2) + 1))); ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); /* Indicate Configuration Complete */ ql_write_page0_reg(qdev, &port_regs->portControl, ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); do { value = ql_read_page0_reg(qdev, &port_regs->portStatus); if (value & PORT_STATUS_IC) break; spin_unlock_irq(&qdev->hw_lock); msleep(500); spin_lock_irq(&qdev->hw_lock); } while (--delay); if (delay == 0) { netdev_err(qdev->ndev, "Hw Initialization timeout\n"); status = -1; goto out; } /* Enable Ethernet Function */ if (qdev->device_id == QL3032_DEVICE_ID) { value = (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | QL3032_PORT_CONTROL_ET); ql_write_page0_reg(qdev, &port_regs->functionControl, ((value << 16) | value)); } else { value = (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | PORT_CONTROL_HH); ql_write_page0_reg(qdev, &port_regs->portControl, ((value << 16) | value)); } out: return status; } /* * Caller holds hw_lock. */ static int ql_adapter_reset(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; int status = 0; u16 value; int max_wait_time; set_bit(QL_RESET_ACTIVE, &qdev->flags); clear_bit(QL_RESET_DONE, &qdev->flags); /* * Issue soft reset to chip. */ netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); ql_write_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus, ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); /* Wait 3 seconds for reset to complete. */ netdev_printk(KERN_DEBUG, qdev->ndev, "Wait 10 milliseconds for reset to complete\n"); /* Wait until the firmware tells us the Soft Reset is done */ max_wait_time = 5; do { value = ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); if ((value & ISP_CONTROL_SR) == 0) break; mdelay(1000); } while ((--max_wait_time)); /* * Also, make sure that the Network Reset Interrupt bit has been * cleared after the soft reset has taken place. */ value = ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); if (value & ISP_CONTROL_RI) { netdev_printk(KERN_DEBUG, qdev->ndev, "clearing RI after reset\n"); ql_write_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus, ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); } if (max_wait_time == 0) { /* Issue Force Soft Reset */ ql_write_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus, ((ISP_CONTROL_FSR << 16) | ISP_CONTROL_FSR)); /* * Wait until the firmware tells us the Force Soft Reset is * done */ max_wait_time = 5; do { value = ql_read_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus); if ((value & ISP_CONTROL_FSR) == 0) break; mdelay(1000); } while ((--max_wait_time)); } if (max_wait_time == 0) status = 1; clear_bit(QL_RESET_ACTIVE, &qdev->flags); set_bit(QL_RESET_DONE, &qdev->flags); return status; } static void ql_set_mac_info(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value, port_status; u8 func_number; /* Get the function number */ value = ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); switch (value & ISP_CONTROL_FN_MASK) { case ISP_CONTROL_FN0_NET: qdev->mac_index = 0; qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; qdev->mb_bit_mask = FN0_MA_BITS_MASK; qdev->PHYAddr = PORT0_PHY_ADDRESS; if (port_status & PORT_STATUS_SM0) set_bit(QL_LINK_OPTICAL, &qdev->flags); else clear_bit(QL_LINK_OPTICAL, &qdev->flags); break; case ISP_CONTROL_FN1_NET: qdev->mac_index = 1; qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; qdev->mb_bit_mask = FN1_MA_BITS_MASK; qdev->PHYAddr = PORT1_PHY_ADDRESS; if (port_status & PORT_STATUS_SM1) set_bit(QL_LINK_OPTICAL, &qdev->flags); else clear_bit(QL_LINK_OPTICAL, &qdev->flags); break; case ISP_CONTROL_FN0_SCSI: case ISP_CONTROL_FN1_SCSI: default: netdev_printk(KERN_DEBUG, qdev->ndev, "Invalid function number, ispControlStatus = 0x%x\n", value); break; } qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; } static void ql_display_dev_info(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); struct pci_dev *pdev = qdev->pdev; netdev_info(ndev, "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", qdev->pci_slot); netdev_info(ndev, "%s Interface\n", test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); /* * Print PCI bus width/type. */ netdev_info(ndev, "Bus interface is %s %s\n", ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), ((qdev->pci_x) ? "PCI-X" : "PCI")); netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", qdev->mem_map_registers); netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); } static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) { struct net_device *ndev = qdev->ndev; int retval = 0; netif_stop_queue(ndev); netif_carrier_off(ndev); clear_bit(QL_ADAPTER_UP, &qdev->flags); clear_bit(QL_LINK_MASTER, &qdev->flags); ql_disable_interrupts(qdev); free_irq(qdev->pdev->irq, ndev); if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); clear_bit(QL_MSI_ENABLED, &qdev->flags); pci_disable_msi(qdev->pdev); } del_timer_sync(&qdev->adapter_timer); napi_disable(&qdev->napi); if (do_reset) { int soft_reset; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (ql_wait_for_drvr_lock(qdev)) { soft_reset = ql_adapter_reset(qdev); if (soft_reset) { netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", qdev->index); } netdev_err(ndev, "Releasing driver lock via chip reset\n"); } else { netdev_err(ndev, "Could not acquire driver lock to do reset!\n"); retval = -1; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); } ql_free_mem_resources(qdev); return retval; } static int ql_adapter_up(struct ql3_adapter *qdev) { struct net_device *ndev = qdev->ndev; int err; unsigned long irq_flags = IRQF_SHARED; unsigned long hw_flags; if (ql_alloc_mem_resources(qdev)) { netdev_err(ndev, "Unable to allocate buffers\n"); return -ENOMEM; } if (qdev->msi) { if (pci_enable_msi(qdev->pdev)) { netdev_err(ndev, "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); qdev->msi = 0; } else { netdev_info(ndev, "MSI Enabled...\n"); set_bit(QL_MSI_ENABLED, &qdev->flags); irq_flags &= ~IRQF_SHARED; } } err = request_irq(qdev->pdev->irq, ql3xxx_isr, irq_flags, ndev->name, ndev); if (err) { netdev_err(ndev, "Failed to reserve interrupt %d - already in use\n", qdev->pdev->irq); goto err_irq; } spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (!ql_wait_for_drvr_lock(qdev)) { netdev_err(ndev, "Could not acquire driver lock\n"); err = -ENODEV; goto err_lock; } err = ql_adapter_initialize(qdev); if (err) { netdev_err(ndev, "Unable to initialize adapter\n"); goto err_init; } ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); set_bit(QL_ADAPTER_UP, &qdev->flags); mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); napi_enable(&qdev->napi); ql_enable_interrupts(qdev); return 0; err_init: ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); err_lock: spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); free_irq(qdev->pdev->irq, ndev); err_irq: if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { netdev_info(ndev, "calling pci_disable_msi()\n"); clear_bit(QL_MSI_ENABLED, &qdev->flags); pci_disable_msi(qdev->pdev); } return err; } static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) { if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { netdev_err(qdev->ndev, "Driver up/down cycle failed, closing device\n"); rtnl_lock(); dev_close(qdev->ndev); rtnl_unlock(); return -1; } return 0; } static int ql3xxx_close(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); /* * Wait for device to recover from a reset. * (Rarely happens, but possible.) */ while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) msleep(50); ql_adapter_down(qdev, QL_DO_RESET); return 0; } static int ql3xxx_open(struct net_device *ndev) { struct ql3_adapter *qdev = netdev_priv(ndev); return ql_adapter_up(qdev); } static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) { struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; struct sockaddr *addr = p; unsigned long hw_flags; if (netif_running(ndev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(ndev, addr->sa_data); spin_lock_irqsave(&qdev->hw_lock, hw_flags); /* Program lower 32 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((ndev->dev_addr[2] << 24) | (ndev-> dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); /* Program top 16 bits of the MAC address */ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return 0; } static void ql3xxx_tx_timeout(struct net_device *ndev, unsigned int txqueue) { struct ql3_adapter *qdev = netdev_priv(ndev); netdev_err(ndev, "Resetting...\n"); /* * Stop the queues, we've got a problem. */ netif_stop_queue(ndev); /* * Wake up the worker to process this event. */ queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); } static void ql_reset_work(struct work_struct *work) { struct ql3_adapter *qdev = container_of(work, struct ql3_adapter, reset_work.work); struct net_device *ndev = qdev->ndev; u32 value; struct ql_tx_buf_cb *tx_cb; int max_wait_time, i; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; unsigned long hw_flags; if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) || test_bit(QL_RESET_START, &qdev->flags)) { clear_bit(QL_LINK_MASTER, &qdev->flags); /* * Loop through the active list and return the skb. */ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { int j; tx_cb = &qdev->tx_buf[i]; if (tx_cb->skb) { netdev_printk(KERN_DEBUG, ndev, "Freeing lost SKB\n"); dma_unmap_single(&qdev->pdev->dev, dma_unmap_addr(&tx_cb->map[0], mapaddr), dma_unmap_len(&tx_cb->map[0], maplen), DMA_TO_DEVICE); for (j = 1; j < tx_cb->seg_count; j++) { dma_unmap_page(&qdev->pdev->dev, dma_unmap_addr(&tx_cb->map[j], mapaddr), dma_unmap_len(&tx_cb->map[j], maplen), DMA_TO_DEVICE); } dev_kfree_skb(tx_cb->skb); tx_cb->skb = NULL; } } netdev_err(ndev, "Clearing NRI after reset\n"); spin_lock_irqsave(&qdev->hw_lock, hw_flags); ql_write_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus, ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); /* * Wait the for Soft Reset to Complete. */ max_wait_time = 10; do { value = ql_read_common_reg(qdev, &port_regs->CommonRegs. ispControlStatus); if ((value & ISP_CONTROL_SR) == 0) { netdev_printk(KERN_DEBUG, ndev, "reset completed\n"); break; } if (value & ISP_CONTROL_RI) { netdev_printk(KERN_DEBUG, ndev, "clearing NRI after reset\n"); ql_write_common_reg(qdev, &port_regs-> CommonRegs. ispControlStatus, ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); ssleep(1); spin_lock_irqsave(&qdev->hw_lock, hw_flags); } while (--max_wait_time); spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); if (value & ISP_CONTROL_SR) { /* * Set the reset flags and clear the board again. * Nothing else to do... */ netdev_err(ndev, "Timed out waiting for reset to complete\n"); netdev_err(ndev, "Do a reset\n"); clear_bit(QL_RESET_PER_SCSI, &qdev->flags); clear_bit(QL_RESET_START, &qdev->flags); ql_cycle_adapter(qdev, QL_DO_RESET); return; } clear_bit(QL_RESET_ACTIVE, &qdev->flags); clear_bit(QL_RESET_PER_SCSI, &qdev->flags); clear_bit(QL_RESET_START, &qdev->flags); ql_cycle_adapter(qdev, QL_NO_RESET); } } static void ql_tx_timeout_work(struct work_struct *work) { struct ql3_adapter *qdev = container_of(work, struct ql3_adapter, tx_timeout_work.work); ql_cycle_adapter(qdev, QL_DO_RESET); } static void ql_get_board_info(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; u32 value; value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); if (value & PORT_STATUS_64) qdev->pci_width = 64; else qdev->pci_width = 32; if (value & PORT_STATUS_X) qdev->pci_x = 1; else qdev->pci_x = 0; qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); } static void ql3xxx_timer(struct timer_list *t) { struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer); queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); } static const struct net_device_ops ql3xxx_netdev_ops = { .ndo_open = ql3xxx_open, .ndo_start_xmit = ql3xxx_send, .ndo_stop = ql3xxx_close, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ql3xxx_set_mac_address, .ndo_tx_timeout = ql3xxx_tx_timeout, }; static int ql3xxx_probe(struct pci_dev *pdev, const struct pci_device_id *pci_entry) { struct net_device *ndev = NULL; struct ql3_adapter *qdev = NULL; static int cards_found; int err; err = pci_enable_device(pdev); if (err) { pr_err("%s cannot enable PCI device\n", pci_name(pdev)); goto err_out; } err = pci_request_regions(pdev, DRV_NAME); if (err) { pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); goto err_out_disable_pdev; } pci_set_master(pdev); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { pr_err("%s no usable DMA configuration\n", pci_name(pdev)); goto err_out_free_regions; } ndev = alloc_etherdev(sizeof(struct ql3_adapter)); if (!ndev) { err = -ENOMEM; goto err_out_free_regions; } SET_NETDEV_DEV(ndev, &pdev->dev); pci_set_drvdata(pdev, ndev); qdev = netdev_priv(ndev); qdev->index = cards_found; qdev->ndev = ndev; qdev->pdev = pdev; qdev->device_id = pci_entry->device; qdev->port_link_state = LS_DOWN; if (msi) qdev->msi = 1; qdev->msg_enable = netif_msg_init(debug, default_msg); ndev->features |= NETIF_F_HIGHDMA; if (qdev->device_id == QL3032_DEVICE_ID) ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); if (!qdev->mem_map_registers) { pr_err("%s: cannot map device registers\n", pci_name(pdev)); err = -EIO; goto err_out_free_ndev; } spin_lock_init(&qdev->adapter_lock); spin_lock_init(&qdev->hw_lock); /* Set driver entry points */ ndev->netdev_ops = &ql3xxx_netdev_ops; ndev->ethtool_ops = &ql3xxx_ethtool_ops; ndev->watchdog_timeo = 5 * HZ; netif_napi_add(ndev, &qdev->napi, ql_poll); ndev->irq = pdev->irq; /* make sure the EEPROM is good */ if (ql_get_nvram_params(qdev)) { pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", __func__, qdev->index); err = -EIO; goto err_out_iounmap; } ql_set_mac_info(qdev); /* Validate and set parameters */ if (qdev->mac_index) { ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); } else { ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); } ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; /* Record PCI bus information. */ ql_get_board_info(qdev); /* * Set the Maximum Memory Read Byte Count value. We do this to handle * jumbo frames. */ if (qdev->pci_x) pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); err = register_netdev(ndev); if (err) { pr_err("%s: cannot register net device\n", pci_name(pdev)); goto err_out_iounmap; } /* we're going to reset, so assume we have no link for now */ netif_carrier_off(ndev); netif_stop_queue(ndev); qdev->workqueue = create_singlethread_workqueue(ndev->name); if (!qdev->workqueue) { unregister_netdev(ndev); err = -ENOMEM; goto err_out_iounmap; } INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0); qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ if (!cards_found) { pr_alert("%s\n", DRV_STRING); pr_alert("Driver name: %s, Version: %s\n", DRV_NAME, DRV_VERSION); } ql_display_dev_info(ndev); cards_found++; return 0; err_out_iounmap: iounmap(qdev->mem_map_registers); err_out_free_ndev: free_netdev(ndev); err_out_free_regions: pci_release_regions(pdev); err_out_disable_pdev: pci_disable_device(pdev); err_out: return err; } static void ql3xxx_remove(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql3_adapter *qdev = netdev_priv(ndev); unregister_netdev(ndev); ql_disable_interrupts(qdev); if (qdev->workqueue) { cancel_delayed_work(&qdev->reset_work); cancel_delayed_work(&qdev->tx_timeout_work); destroy_workqueue(qdev->workqueue); qdev->workqueue = NULL; } iounmap(qdev->mem_map_registers); pci_release_regions(pdev); free_netdev(ndev); } static struct pci_driver ql3xxx_driver = { .name = DRV_NAME, .id_table = ql3xxx_pci_tbl, .probe = ql3xxx_probe, .remove = ql3xxx_remove, }; module_pci_driver(ql3xxx_driver);
linux-master
drivers/net/ethernet/qlogic/qla3xxx.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qedr NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/qed/qede_rdma.h> #include "qede.h" static struct qedr_driver *qedr_drv; static LIST_HEAD(qedr_dev_list); static DEFINE_MUTEX(qedr_dev_list_lock); bool qede_rdma_supported(struct qede_dev *dev) { return dev->dev_info.common.rdma_supported; } static void _qede_rdma_dev_add(struct qede_dev *edev) { if (!qedr_drv) return; /* Leftovers from previous error recovery */ edev->rdma_info.exp_recovery = false; edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev, edev->ndev); } static int qede_rdma_create_wq(struct qede_dev *edev) { INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list); kref_init(&edev->rdma_info.refcnt); init_completion(&edev->rdma_info.event_comp); edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq"); if (!edev->rdma_info.rdma_wq) { DP_NOTICE(edev, "qedr: Could not create workqueue\n"); return -ENOMEM; } return 0; } static void qede_rdma_cleanup_event(struct qede_dev *edev) { struct list_head *head = &edev->rdma_info.rdma_event_list; struct qede_rdma_event_work *event_node; flush_workqueue(edev->rdma_info.rdma_wq); while (!list_empty(head)) { event_node = list_entry(head->next, struct qede_rdma_event_work, list); cancel_work_sync(&event_node->work); list_del(&event_node->list); kfree(event_node); } } static void qede_rdma_complete_event(struct kref *ref) { struct qede_rdma_dev *rdma_dev = container_of(ref, struct qede_rdma_dev, refcnt); /* no more events will be added after this */ complete(&rdma_dev->event_comp); } static void qede_rdma_destroy_wq(struct qede_dev *edev) { /* Avoid race with add_event flow, make sure it finishes before * we start accessing the list and cleaning up the work */ kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event); wait_for_completion(&edev->rdma_info.event_comp); qede_rdma_cleanup_event(edev); destroy_workqueue(edev->rdma_info.rdma_wq); edev->rdma_info.rdma_wq = NULL; } int qede_rdma_dev_add(struct qede_dev *edev, bool recovery) { int rc; if (!qede_rdma_supported(edev)) return 0; /* Cannot start qedr while recovering since it wasn't fully stopped */ if (recovery) return 0; rc = qede_rdma_create_wq(edev); if (rc) return rc; INIT_LIST_HEAD(&edev->rdma_info.entry); mutex_lock(&qedr_dev_list_lock); list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); _qede_rdma_dev_add(edev); mutex_unlock(&qedr_dev_list_lock); return rc; } static void _qede_rdma_dev_remove(struct qede_dev *edev) { if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) qedr_drv->remove(edev->rdma_info.qedr_dev); } void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery) { if (!qede_rdma_supported(edev)) return; /* Cannot remove qedr while recovering since it wasn't fully stopped */ if (!recovery) { qede_rdma_destroy_wq(edev); mutex_lock(&qedr_dev_list_lock); if (!edev->rdma_info.exp_recovery) _qede_rdma_dev_remove(edev); edev->rdma_info.qedr_dev = NULL; list_del(&edev->rdma_info.entry); mutex_unlock(&qedr_dev_list_lock); } else { if (!edev->rdma_info.exp_recovery) { mutex_lock(&qedr_dev_list_lock); _qede_rdma_dev_remove(edev); mutex_unlock(&qedr_dev_list_lock); } edev->rdma_info.exp_recovery = true; } } static void _qede_rdma_dev_open(struct qede_dev *edev) { if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP); } static void qede_rdma_dev_open(struct qede_dev *edev) { if (!qede_rdma_supported(edev)) return; mutex_lock(&qedr_dev_list_lock); _qede_rdma_dev_open(edev); mutex_unlock(&qedr_dev_list_lock); } static void _qede_rdma_dev_close(struct qede_dev *edev) { if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN); } static void qede_rdma_dev_close(struct qede_dev *edev) { if (!qede_rdma_supported(edev)) return; mutex_lock(&qedr_dev_list_lock); _qede_rdma_dev_close(edev); mutex_unlock(&qedr_dev_list_lock); } static void qede_rdma_dev_shutdown(struct qede_dev *edev) { if (!qede_rdma_supported(edev)) return; mutex_lock(&qedr_dev_list_lock); if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE); mutex_unlock(&qedr_dev_list_lock); } int qede_rdma_register_driver(struct qedr_driver *drv) { struct qede_dev *edev; u8 qedr_counter = 0; mutex_lock(&qedr_dev_list_lock); if (qedr_drv) { mutex_unlock(&qedr_dev_list_lock); return -EINVAL; } qedr_drv = drv; list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { struct net_device *ndev; qedr_counter++; _qede_rdma_dev_add(edev); ndev = edev->ndev; if (netif_running(ndev) && netif_oper_up(ndev)) _qede_rdma_dev_open(edev); } mutex_unlock(&qedr_dev_list_lock); pr_notice("qedr: discovered and registered %d RDMA funcs\n", qedr_counter); return 0; } EXPORT_SYMBOL(qede_rdma_register_driver); void qede_rdma_unregister_driver(struct qedr_driver *drv) { struct qede_dev *edev; mutex_lock(&qedr_dev_list_lock); list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { /* If device has experienced recovery it was already removed */ if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery) _qede_rdma_dev_remove(edev); } qedr_drv = NULL; mutex_unlock(&qedr_dev_list_lock); } EXPORT_SYMBOL(qede_rdma_unregister_driver); static void qede_rdma_changeaddr(struct qede_dev *edev) { if (!qede_rdma_supported(edev)) return; if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR); } static void qede_rdma_change_mtu(struct qede_dev *edev) { if (qede_rdma_supported(edev)) { if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_MTU); } } static struct qede_rdma_event_work * qede_rdma_get_free_event_node(struct qede_dev *edev) { struct qede_rdma_event_work *event_node = NULL; bool found = false; list_for_each_entry(event_node, &edev->rdma_info.rdma_event_list, list) { if (!work_pending(&event_node->work)) { found = true; break; } } if (!found) { event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC); if (!event_node) { DP_NOTICE(edev, "qedr: Could not allocate memory for rdma work\n"); return NULL; } list_add_tail(&event_node->list, &edev->rdma_info.rdma_event_list); } return event_node; } static void qede_rdma_handle_event(struct work_struct *work) { struct qede_rdma_event_work *event_node; enum qede_rdma_event event; struct qede_dev *edev; event_node = container_of(work, struct qede_rdma_event_work, work); event = event_node->event; edev = event_node->ptr; switch (event) { case QEDE_UP: qede_rdma_dev_open(edev); break; case QEDE_DOWN: qede_rdma_dev_close(edev); break; case QEDE_CLOSE: qede_rdma_dev_shutdown(edev); break; case QEDE_CHANGE_ADDR: qede_rdma_changeaddr(edev); break; case QEDE_CHANGE_MTU: qede_rdma_change_mtu(edev); break; default: DP_NOTICE(edev, "Invalid rdma event %d", event); } } static void qede_rdma_add_event(struct qede_dev *edev, enum qede_rdma_event event) { struct qede_rdma_event_work *event_node; /* If a recovery was experienced avoid adding the event */ if (edev->rdma_info.exp_recovery) return; if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq) return; /* We don't want the cleanup flow to start while we're allocating and * scheduling the work */ if (!kref_get_unless_zero(&edev->rdma_info.refcnt)) return; /* already being destroyed */ event_node = qede_rdma_get_free_event_node(edev); if (!event_node) goto out; event_node->event = event; event_node->ptr = edev; INIT_WORK(&event_node->work, qede_rdma_handle_event); queue_work(edev->rdma_info.rdma_wq, &event_node->work); out: kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event); } void qede_rdma_dev_event_open(struct qede_dev *edev) { qede_rdma_add_event(edev, QEDE_UP); } void qede_rdma_dev_event_close(struct qede_dev *edev) { qede_rdma_add_event(edev, QEDE_DOWN); } void qede_rdma_event_changeaddr(struct qede_dev *edev) { qede_rdma_add_event(edev, QEDE_CHANGE_ADDR); } void qede_rdma_event_change_mtu(struct qede_dev *edev) { qede_rdma_add_event(edev, QEDE_CHANGE_MTU); }
linux-master
drivers/net/ethernet/qlogic/qede/qede_rdma.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/capability.h> #include <linux/vmalloc.h> #include <linux/phylink.h> #include "qede.h" #include "qede_ptp.h" #define QEDE_RQSTAT_OFFSET(stat_name) \ (offsetof(struct qede_rx_queue, stat_name)) #define QEDE_RQSTAT_STRING(stat_name) (#stat_name) #define QEDE_RQSTAT(stat_name) \ {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)} #define QEDE_SELFTEST_POLL_COUNT 100 #define QEDE_DUMP_VERSION 0x1 #define QEDE_DUMP_NVM_ARG_COUNT 2 static const struct { u64 offset; char string[ETH_GSTRING_LEN]; } qede_rqstats_arr[] = { QEDE_RQSTAT(rcv_pkts), QEDE_RQSTAT(rx_hw_errors), QEDE_RQSTAT(rx_alloc_errors), QEDE_RQSTAT(rx_ip_frags), QEDE_RQSTAT(xdp_no_pass), }; #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr) #define QEDE_TQSTAT_OFFSET(stat_name) \ (offsetof(struct qede_tx_queue, stat_name)) #define QEDE_TQSTAT_STRING(stat_name) (#stat_name) #define QEDE_TQSTAT(stat_name) \ {QEDE_TQSTAT_OFFSET(stat_name), QEDE_TQSTAT_STRING(stat_name)} #define QEDE_NUM_TQSTATS ARRAY_SIZE(qede_tqstats_arr) static const struct { u64 offset; char string[ETH_GSTRING_LEN]; } qede_tqstats_arr[] = { QEDE_TQSTAT(xmit_pkts), QEDE_TQSTAT(stopped_cnt), QEDE_TQSTAT(tx_mem_alloc_err), }; #define QEDE_STAT_OFFSET(stat_name, type, base) \ (offsetof(type, stat_name) + (base)) #define QEDE_STAT_STRING(stat_name) (#stat_name) #define _QEDE_STAT(stat_name, type, base, attr) \ {QEDE_STAT_OFFSET(stat_name, type, base), \ QEDE_STAT_STRING(stat_name), \ attr} #define QEDE_STAT(stat_name) \ _QEDE_STAT(stat_name, struct qede_stats_common, 0, 0x0) #define QEDE_PF_STAT(stat_name) \ _QEDE_STAT(stat_name, struct qede_stats_common, 0, \ BIT(QEDE_STAT_PF_ONLY)) #define QEDE_PF_BB_STAT(stat_name) \ _QEDE_STAT(stat_name, struct qede_stats_bb, \ offsetof(struct qede_stats, bb), \ BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_BB_ONLY)) #define QEDE_PF_AH_STAT(stat_name) \ _QEDE_STAT(stat_name, struct qede_stats_ah, \ offsetof(struct qede_stats, ah), \ BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_AH_ONLY)) static const struct { u64 offset; char string[ETH_GSTRING_LEN]; unsigned long attr; #define QEDE_STAT_PF_ONLY 0 #define QEDE_STAT_BB_ONLY 1 #define QEDE_STAT_AH_ONLY 2 } qede_stats_arr[] = { QEDE_STAT(rx_ucast_bytes), QEDE_STAT(rx_mcast_bytes), QEDE_STAT(rx_bcast_bytes), QEDE_STAT(rx_ucast_pkts), QEDE_STAT(rx_mcast_pkts), QEDE_STAT(rx_bcast_pkts), QEDE_STAT(tx_ucast_bytes), QEDE_STAT(tx_mcast_bytes), QEDE_STAT(tx_bcast_bytes), QEDE_STAT(tx_ucast_pkts), QEDE_STAT(tx_mcast_pkts), QEDE_STAT(tx_bcast_pkts), QEDE_PF_STAT(rx_64_byte_packets), QEDE_PF_STAT(rx_65_to_127_byte_packets), QEDE_PF_STAT(rx_128_to_255_byte_packets), QEDE_PF_STAT(rx_256_to_511_byte_packets), QEDE_PF_STAT(rx_512_to_1023_byte_packets), QEDE_PF_STAT(rx_1024_to_1518_byte_packets), QEDE_PF_BB_STAT(rx_1519_to_1522_byte_packets), QEDE_PF_BB_STAT(rx_1519_to_2047_byte_packets), QEDE_PF_BB_STAT(rx_2048_to_4095_byte_packets), QEDE_PF_BB_STAT(rx_4096_to_9216_byte_packets), QEDE_PF_BB_STAT(rx_9217_to_16383_byte_packets), QEDE_PF_AH_STAT(rx_1519_to_max_byte_packets), QEDE_PF_STAT(tx_64_byte_packets), QEDE_PF_STAT(tx_65_to_127_byte_packets), QEDE_PF_STAT(tx_128_to_255_byte_packets), QEDE_PF_STAT(tx_256_to_511_byte_packets), QEDE_PF_STAT(tx_512_to_1023_byte_packets), QEDE_PF_STAT(tx_1024_to_1518_byte_packets), QEDE_PF_BB_STAT(tx_1519_to_2047_byte_packets), QEDE_PF_BB_STAT(tx_2048_to_4095_byte_packets), QEDE_PF_BB_STAT(tx_4096_to_9216_byte_packets), QEDE_PF_BB_STAT(tx_9217_to_16383_byte_packets), QEDE_PF_AH_STAT(tx_1519_to_max_byte_packets), QEDE_PF_STAT(rx_mac_crtl_frames), QEDE_PF_STAT(tx_mac_ctrl_frames), QEDE_PF_STAT(rx_pause_frames), QEDE_PF_STAT(tx_pause_frames), QEDE_PF_STAT(rx_pfc_frames), QEDE_PF_STAT(tx_pfc_frames), QEDE_PF_STAT(rx_crc_errors), QEDE_PF_STAT(rx_align_errors), QEDE_PF_STAT(rx_carrier_errors), QEDE_PF_STAT(rx_oversize_packets), QEDE_PF_STAT(rx_jabbers), QEDE_PF_STAT(rx_undersize_packets), QEDE_PF_STAT(rx_fragments), QEDE_PF_BB_STAT(tx_lpi_entry_count), QEDE_PF_BB_STAT(tx_total_collisions), QEDE_PF_STAT(brb_truncates), QEDE_PF_STAT(brb_discards), QEDE_STAT(no_buff_discards), QEDE_PF_STAT(mftag_filter_discards), QEDE_PF_STAT(mac_filter_discards), QEDE_PF_STAT(gft_filter_drop), QEDE_STAT(tx_err_drop_pkts), QEDE_STAT(ttl0_discard), QEDE_STAT(packet_too_big_discard), QEDE_STAT(coalesced_pkts), QEDE_STAT(coalesced_events), QEDE_STAT(coalesced_aborts_num), QEDE_STAT(non_coalesced_pkts), QEDE_STAT(coalesced_bytes), QEDE_STAT(link_change_count), QEDE_STAT(ptp_skip_txts), }; #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) #define QEDE_STAT_IS_PF_ONLY(i) \ test_bit(QEDE_STAT_PF_ONLY, &qede_stats_arr[i].attr) #define QEDE_STAT_IS_BB_ONLY(i) \ test_bit(QEDE_STAT_BB_ONLY, &qede_stats_arr[i].attr) #define QEDE_STAT_IS_AH_ONLY(i) \ test_bit(QEDE_STAT_AH_ONLY, &qede_stats_arr[i].attr) enum { QEDE_PRI_FLAG_CMT, QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */ QEDE_PRI_FLAG_RECOVER_ON_ERROR, QEDE_PRI_FLAG_ESL_SUPPORT, /* MFW supports Enhanced System Lockdown */ QEDE_PRI_FLAG_ESL_ACTIVE, /* Enhanced System Lockdown Active status */ QEDE_PRI_FLAG_LEN, }; static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { "Coupled-Function", "SmartAN capable", "Recover on error", "ESL capable", "ESL active", }; enum qede_ethtool_tests { QEDE_ETHTOOL_INT_LOOPBACK, QEDE_ETHTOOL_INTERRUPT_TEST, QEDE_ETHTOOL_MEMORY_TEST, QEDE_ETHTOOL_REGISTER_TEST, QEDE_ETHTOOL_CLOCK_TEST, QEDE_ETHTOOL_NVRAM_TEST, QEDE_ETHTOOL_TEST_MAX }; static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = { "Internal loopback (offline)", "Interrupt (online)\t", "Memory (online)\t\t", "Register (online)\t", "Clock (online)\t\t", "Nvram (online)\t\t", }; /* Forced speed capabilities maps */ struct qede_forced_speed_map { u32 speed; __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); const u32 *cap_arr; u32 arr_size; }; #define QEDE_FORCED_SPEED_MAP(value) \ { \ .speed = SPEED_##value, \ .cap_arr = qede_forced_speed_##value, \ .arr_size = ARRAY_SIZE(qede_forced_speed_##value), \ } static const u32 qede_forced_speed_1000[] __initconst = { ETHTOOL_LINK_MODE_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_1000baseX_Full_BIT, }; static const u32 qede_forced_speed_10000[] __initconst = { ETHTOOL_LINK_MODE_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, }; static const u32 qede_forced_speed_20000[] __initconst = { ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, }; static const u32 qede_forced_speed_25000[] __initconst = { ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, }; static const u32 qede_forced_speed_40000[] __initconst = { ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, }; static const u32 qede_forced_speed_50000[] __initconst = { ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, }; static const u32 qede_forced_speed_100000[] __initconst = { ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, }; static struct qede_forced_speed_map qede_forced_speed_maps[] __ro_after_init = { QEDE_FORCED_SPEED_MAP(1000), QEDE_FORCED_SPEED_MAP(10000), QEDE_FORCED_SPEED_MAP(20000), QEDE_FORCED_SPEED_MAP(25000), QEDE_FORCED_SPEED_MAP(40000), QEDE_FORCED_SPEED_MAP(50000), QEDE_FORCED_SPEED_MAP(100000), }; void __init qede_forced_speed_maps_init(void) { struct qede_forced_speed_map *map; u32 i; for (i = 0; i < ARRAY_SIZE(qede_forced_speed_maps); i++) { map = qede_forced_speed_maps + i; linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); map->cap_arr = NULL; map->arr_size = 0; } } /* Ethtool callbacks */ static void qede_get_strings_stats_txq(struct qede_dev *edev, struct qede_tx_queue *txq, u8 **buf) { int i; for (i = 0; i < QEDE_NUM_TQSTATS; i++) { if (txq->is_xdp) sprintf(*buf, "%d [XDP]: %s", QEDE_TXQ_XDP_TO_IDX(edev, txq), qede_tqstats_arr[i].string); else sprintf(*buf, "%d_%d: %s", txq->index, txq->cos, qede_tqstats_arr[i].string); *buf += ETH_GSTRING_LEN; } } static void qede_get_strings_stats_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq, u8 **buf) { int i; for (i = 0; i < QEDE_NUM_RQSTATS; i++) { sprintf(*buf, "%d: %s", rxq->rxq_id, qede_rqstats_arr[i].string); *buf += ETH_GSTRING_LEN; } } static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index) { return (IS_VF(edev) && QEDE_STAT_IS_PF_ONLY(stat_index)) || (QEDE_IS_BB(edev) && QEDE_STAT_IS_AH_ONLY(stat_index)) || (QEDE_IS_AH(edev) && QEDE_STAT_IS_BB_ONLY(stat_index)); } static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) { struct qede_fastpath *fp; int i; /* Account for queue statistics */ for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { fp = &edev->fp_array[i]; if (fp->type & QEDE_FASTPATH_RX) qede_get_strings_stats_rxq(edev, fp->rxq, &buf); if (fp->type & QEDE_FASTPATH_XDP) qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf); if (fp->type & QEDE_FASTPATH_TX) { int cos; for_each_cos_in_txq(edev, cos) qede_get_strings_stats_txq(edev, &fp->txq[cos], &buf); } } /* Account for non-queue statistics */ for (i = 0; i < QEDE_NUM_STATS; i++) { if (qede_is_irrelevant_stat(edev, i)) continue; strcpy(buf, qede_stats_arr[i].string); buf += ETH_GSTRING_LEN; } } static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct qede_dev *edev = netdev_priv(dev); switch (stringset) { case ETH_SS_STATS: qede_get_strings_stats(edev, buf); break; case ETH_SS_PRIV_FLAGS: memcpy(buf, qede_private_arr, ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN); break; case ETH_SS_TEST: memcpy(buf, qede_tests_str_arr, ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX); break; default: DP_VERBOSE(edev, QED_MSG_DEBUG, "Unsupported stringset 0x%08x\n", stringset); } } static void qede_get_ethtool_stats_txq(struct qede_tx_queue *txq, u64 **buf) { int i; for (i = 0; i < QEDE_NUM_TQSTATS; i++) { **buf = *((u64 *)(((void *)txq) + qede_tqstats_arr[i].offset)); (*buf)++; } } static void qede_get_ethtool_stats_rxq(struct qede_rx_queue *rxq, u64 **buf) { int i; for (i = 0; i < QEDE_NUM_RQSTATS; i++) { **buf = *((u64 *)(((void *)rxq) + qede_rqstats_arr[i].offset)); (*buf)++; } } static void qede_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *buf) { struct qede_dev *edev = netdev_priv(dev); struct qede_fastpath *fp; int i; qede_fill_by_demand_stats(edev); /* Need to protect the access to the fastpath array */ __qede_lock(edev); for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { fp = &edev->fp_array[i]; if (fp->type & QEDE_FASTPATH_RX) qede_get_ethtool_stats_rxq(fp->rxq, &buf); if (fp->type & QEDE_FASTPATH_XDP) qede_get_ethtool_stats_txq(fp->xdp_tx, &buf); if (fp->type & QEDE_FASTPATH_TX) { int cos; for_each_cos_in_txq(edev, cos) qede_get_ethtool_stats_txq(&fp->txq[cos], &buf); } } spin_lock(&edev->stats_lock); for (i = 0; i < QEDE_NUM_STATS; i++) { if (qede_is_irrelevant_stat(edev, i)) continue; *buf = *((u64 *)(((void *)&edev->stats) + qede_stats_arr[i].offset)); buf++; } spin_unlock(&edev->stats_lock); __qede_unlock(edev); } static int qede_get_sset_count(struct net_device *dev, int stringset) { struct qede_dev *edev = netdev_priv(dev); int num_stats = QEDE_NUM_STATS, i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < QEDE_NUM_STATS; i++) if (qede_is_irrelevant_stat(edev, i)) num_stats--; /* Account for the Regular Tx statistics */ num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS * edev->dev_info.num_tc; /* Account for the Regular Rx statistics */ num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS; /* Account for XDP statistics [if needed] */ if (edev->xdp_prog) num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_TQSTATS; return num_stats; case ETH_SS_PRIV_FLAGS: return QEDE_PRI_FLAG_LEN; case ETH_SS_TEST: if (!IS_VF(edev)) return QEDE_ETHTOOL_TEST_MAX; else return 0; default: DP_VERBOSE(edev, QED_MSG_DEBUG, "Unsupported stringset 0x%08x\n", stringset); return -EINVAL; } } static u32 qede_get_priv_flags(struct net_device *dev) { struct qede_dev *edev = netdev_priv(dev); bool esl_active; u32 flags = 0; if (edev->dev_info.common.num_hwfns > 1) flags |= BIT(QEDE_PRI_FLAG_CMT); if (edev->dev_info.common.smart_an) flags |= BIT(QEDE_PRI_FLAG_SMART_AN_SUPPORT); if (edev->err_flags & BIT(QEDE_ERR_IS_RECOVERABLE)) flags |= BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR); if (edev->dev_info.common.esl) flags |= BIT(QEDE_PRI_FLAG_ESL_SUPPORT); edev->ops->common->get_esl_status(edev->cdev, &esl_active); if (esl_active) flags |= BIT(QEDE_PRI_FLAG_ESL_ACTIVE); return flags; } static int qede_set_priv_flags(struct net_device *dev, u32 flags) { struct qede_dev *edev = netdev_priv(dev); u32 cflags = qede_get_priv_flags(dev); u32 dflags = flags ^ cflags; /* can only change RECOVER_ON_ERROR flag */ if (dflags & ~BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR)) return -EINVAL; if (flags & BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR)) set_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags); else clear_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags); return 0; } static int qede_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { typeof(cmd->link_modes) *link_modes = &cmd->link_modes; struct ethtool_link_settings *base = &cmd->base; struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; __qede_lock(edev); memset(&current_link, 0, sizeof(current_link)); edev->ops->common->get_link(edev->cdev, &current_link); linkmode_copy(link_modes->supported, current_link.supported_caps); linkmode_copy(link_modes->advertising, current_link.advertised_caps); linkmode_copy(link_modes->lp_advertising, current_link.lp_caps); if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) { base->speed = current_link.speed; base->duplex = current_link.duplex; } else { base->speed = SPEED_UNKNOWN; base->duplex = DUPLEX_UNKNOWN; } __qede_unlock(edev); base->port = current_link.port; base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; return 0; } static int qede_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { const struct ethtool_link_settings *base = &cmd->base; struct qede_dev *edev = netdev_priv(dev); const struct qede_forced_speed_map *map; struct qed_link_output current_link; struct qed_link_params params; u32 i; if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { DP_INFO(edev, "Link settings are not allowed to be changed\n"); return -EOPNOTSUPP; } memset(&current_link, 0, sizeof(current_link)); memset(&params, 0, sizeof(params)); edev->ops->common->get_link(edev->cdev, &current_link); params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS; params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG; if (base->autoneg == AUTONEG_ENABLE) { if (!phylink_test(current_link.supported_caps, Autoneg)) { DP_INFO(edev, "Auto negotiation is not supported\n"); return -EOPNOTSUPP; } params.autoneg = true; params.forced_speed = 0; linkmode_copy(params.adv_speeds, cmd->link_modes.advertising); } else { /* forced speed */ params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED; params.autoneg = false; params.forced_speed = base->speed; for (i = 0; i < ARRAY_SIZE(qede_forced_speed_maps); i++) { map = qede_forced_speed_maps + i; if (base->speed != map->speed || !linkmode_intersects(current_link.supported_caps, map->caps)) continue; linkmode_and(params.adv_speeds, current_link.supported_caps, map->caps); goto set_link; } DP_INFO(edev, "Unsupported speed %u\n", base->speed); return -EINVAL; } set_link: params.link_up = true; edev->ops->common->set_link(edev->cdev, &params); return 0; } static void qede_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN]; struct qede_dev *edev = netdev_priv(ndev); char mbi[ETHTOOL_FWVERS_LEN]; strscpy(info->driver, "qede", sizeof(info->driver)); snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", edev->dev_info.common.fw_major, edev->dev_info.common.fw_minor, edev->dev_info.common.fw_rev, edev->dev_info.common.fw_eng); snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d", (edev->dev_info.common.mfw_rev >> 24) & 0xFF, (edev->dev_info.common.mfw_rev >> 16) & 0xFF, (edev->dev_info.common.mfw_rev >> 8) & 0xFF, edev->dev_info.common.mfw_rev & 0xFF); if ((strlen(storm) + strlen("[storm]")) < sizeof(info->version)) snprintf(info->version, sizeof(info->version), "[storm %s]", storm); else snprintf(info->version, sizeof(info->version), "%s", storm); if (edev->dev_info.common.mbi_version) { snprintf(mbi, ETHTOOL_FWVERS_LEN, "%d.%d.%d", (edev->dev_info.common.mbi_version & QED_MBI_VERSION_2_MASK) >> QED_MBI_VERSION_2_OFFSET, (edev->dev_info.common.mbi_version & QED_MBI_VERSION_1_MASK) >> QED_MBI_VERSION_1_OFFSET, (edev->dev_info.common.mbi_version & QED_MBI_VERSION_0_MASK) >> QED_MBI_VERSION_0_OFFSET); snprintf(info->fw_version, sizeof(info->fw_version), "mbi %s [mfw %s]", mbi, mfw); } else { snprintf(info->fw_version, sizeof(info->fw_version), "mfw %s", mfw); } strscpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info)); } static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { struct qede_dev *edev = netdev_priv(ndev); if (edev->dev_info.common.wol_support) { wol->supported = WAKE_MAGIC; wol->wolopts = edev->wol_enabled ? WAKE_MAGIC : 0; } } static int qede_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { struct qede_dev *edev = netdev_priv(ndev); bool wol_requested; int rc; if (wol->wolopts & ~WAKE_MAGIC) { DP_INFO(edev, "Can't support WoL options other than magic-packet\n"); return -EINVAL; } wol_requested = !!(wol->wolopts & WAKE_MAGIC); if (wol_requested == edev->wol_enabled) return 0; /* Need to actually change configuration */ if (!edev->dev_info.common.wol_support) { DP_INFO(edev, "Device doesn't support WoL\n"); return -EINVAL; } rc = edev->ops->common->update_wol(edev->cdev, wol_requested); if (!rc) edev->wol_enabled = wol_requested; return rc; } static u32 qede_get_msglevel(struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module; } static void qede_set_msglevel(struct net_device *ndev, u32 level) { struct qede_dev *edev = netdev_priv(ndev); u32 dp_module = 0; u8 dp_level = 0; qede_config_debug(level, &dp_module, &dp_level); edev->dp_level = dp_level; edev->dp_module = dp_module; edev->ops->common->update_msglvl(edev->cdev, dp_module, dp_level); } static int qede_nway_reset(struct net_device *dev) { struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; struct qed_link_params link_params; if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { DP_INFO(edev, "Link settings are not allowed to be changed\n"); return -EOPNOTSUPP; } if (!netif_running(dev)) return 0; memset(&current_link, 0, sizeof(current_link)); edev->ops->common->get_link(edev->cdev, &current_link); if (!current_link.link_up) return 0; /* Toggle the link */ memset(&link_params, 0, sizeof(link_params)); link_params.link_up = false; edev->ops->common->set_link(edev->cdev, &link_params); link_params.link_up = true; edev->ops->common->set_link(edev->cdev, &link_params); return 0; } static u32 qede_get_link(struct net_device *dev) { struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; memset(&current_link, 0, sizeof(current_link)); edev->ops->common->get_link(edev->cdev, &current_link); return current_link.link_up; } static int qede_flash_device(struct net_device *dev, struct ethtool_flash *flash) { struct qede_dev *edev = netdev_priv(dev); return edev->ops->common->nvm_flash(edev->cdev, flash->data); } static int qede_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { void *rx_handle = NULL, *tx_handle = NULL; struct qede_dev *edev = netdev_priv(dev); u16 rx_coal, tx_coal, i, rc = 0; struct qede_fastpath *fp; rx_coal = QED_DEFAULT_RX_USECS; tx_coal = QED_DEFAULT_TX_USECS; memset(coal, 0, sizeof(struct ethtool_coalesce)); __qede_lock(edev); if (edev->state == QEDE_STATE_OPEN) { for_each_queue(i) { fp = &edev->fp_array[i]; if (fp->type & QEDE_FASTPATH_RX) { rx_handle = fp->rxq->handle; break; } } rc = edev->ops->get_coalesce(edev->cdev, &rx_coal, rx_handle); if (rc) { DP_INFO(edev, "Read Rx coalesce error\n"); goto out; } for_each_queue(i) { struct qede_tx_queue *txq; fp = &edev->fp_array[i]; /* All TX queues of given fastpath uses same * coalescing value, so no need to iterate over * all TCs, TC0 txq should suffice. */ if (fp->type & QEDE_FASTPATH_TX) { txq = QEDE_FP_TC0_TXQ(fp); tx_handle = txq->handle; break; } } rc = edev->ops->get_coalesce(edev->cdev, &tx_coal, tx_handle); if (rc) DP_INFO(edev, "Read Tx coalesce error\n"); } out: __qede_unlock(edev); coal->rx_coalesce_usecs = rx_coal; coal->tx_coalesce_usecs = tx_coal; coal->stats_block_coalesce_usecs = edev->stats_coal_usecs; return rc; } int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct qede_dev *edev = netdev_priv(dev); struct qede_fastpath *fp; int i, rc = 0; u16 rxc, txc; if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) { edev->stats_coal_usecs = coal->stats_block_coalesce_usecs; if (edev->stats_coal_usecs) { edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs); schedule_delayed_work(&edev->periodic_task, 0); DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n", edev->stats_coal_ticks); } else { cancel_delayed_work_sync(&edev->periodic_task); } } if (!netif_running(dev)) { DP_INFO(edev, "Interface is down\n"); return -EINVAL; } if (coal->rx_coalesce_usecs > QED_COALESCE_MAX || coal->tx_coalesce_usecs > QED_COALESCE_MAX) { DP_INFO(edev, "Can't support requested %s coalesce value [max supported value %d]\n", coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" : "tx", QED_COALESCE_MAX); return -EINVAL; } rxc = (u16)coal->rx_coalesce_usecs; txc = (u16)coal->tx_coalesce_usecs; for_each_queue(i) { fp = &edev->fp_array[i]; if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { rc = edev->ops->common->set_coalesce(edev->cdev, rxc, 0, fp->rxq->handle); if (rc) { DP_INFO(edev, "Set RX coalesce error, rc = %d\n", rc); return rc; } edev->coal_entry[i].rxc = rxc; edev->coal_entry[i].isvalid = true; } if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { struct qede_tx_queue *txq; /* All TX queues of given fastpath uses same * coalescing value, so no need to iterate over * all TCs, TC0 txq should suffice. */ txq = QEDE_FP_TC0_TXQ(fp); rc = edev->ops->common->set_coalesce(edev->cdev, 0, txc, txq->handle); if (rc) { DP_INFO(edev, "Set TX coalesce error, rc = %d\n", rc); return rc; } edev->coal_entry[i].txc = txc; edev->coal_entry[i].isvalid = true; } } return rc; } static void qede_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering, struct kernel_ethtool_ringparam *kernel_ering, struct netlink_ext_ack *extack) { struct qede_dev *edev = netdev_priv(dev); ering->rx_max_pending = NUM_RX_BDS_MAX; ering->rx_pending = edev->q_num_rx_buffers; ering->tx_max_pending = NUM_TX_BDS_MAX; ering->tx_pending = edev->q_num_tx_buffers; } static int qede_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering, struct kernel_ethtool_ringparam *kernel_ering, struct netlink_ext_ack *extack) { struct qede_dev *edev = netdev_priv(dev); DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n", ering->rx_pending, ering->tx_pending); /* Validate legality of configuration */ if (ering->rx_pending > NUM_RX_BDS_MAX || ering->rx_pending < NUM_RX_BDS_MIN || ering->tx_pending > NUM_TX_BDS_MAX || ering->tx_pending < NUM_TX_BDS_MIN) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n", NUM_RX_BDS_MIN, NUM_RX_BDS_MAX, NUM_TX_BDS_MIN, NUM_TX_BDS_MAX); return -EINVAL; } /* Change ring size and re-load */ edev->q_num_rx_buffers = ering->rx_pending; edev->q_num_tx_buffers = ering->tx_pending; qede_reload(edev, NULL, false); return 0; } static void qede_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; memset(&current_link, 0, sizeof(current_link)); edev->ops->common->get_link(edev->cdev, &current_link); if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) epause->autoneg = true; if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) epause->rx_pause = true; if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) epause->tx_pause = true; DP_VERBOSE(edev, QED_MSG_DEBUG, "ethtool_pauseparam: cmd %d autoneg %d rx_pause %d tx_pause %d\n", epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); } static int qede_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct qede_dev *edev = netdev_priv(dev); struct qed_link_params params; struct qed_link_output current_link; if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { DP_INFO(edev, "Pause settings are not allowed to be changed\n"); return -EOPNOTSUPP; } memset(&current_link, 0, sizeof(current_link)); edev->ops->common->get_link(edev->cdev, &current_link); memset(&params, 0, sizeof(params)); params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; if (epause->autoneg) { if (!phylink_test(current_link.supported_caps, Autoneg)) { DP_INFO(edev, "autoneg not supported\n"); return -EINVAL; } params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; } if (epause->rx_pause) params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; if (epause->tx_pause) params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; params.link_up = true; edev->ops->common->set_link(edev->cdev, &params); return 0; } static void qede_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *buffer) { struct qede_dev *edev = netdev_priv(ndev); regs->version = 0; memset(buffer, 0, regs->len); if (edev->ops && edev->ops->common) edev->ops->common->dbg_all_data(edev->cdev, buffer); } static int qede_get_regs_len(struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); if (edev->ops && edev->ops->common) return edev->ops->common->dbg_all_data_size(edev->cdev); else return -EINVAL; } static void qede_update_mtu(struct qede_dev *edev, struct qede_reload_args *args) { edev->ndev->mtu = args->u.mtu; } /* Netdevice NDOs */ int qede_change_mtu(struct net_device *ndev, int new_mtu) { struct qede_dev *edev = netdev_priv(ndev); struct qede_reload_args args; DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "Configuring MTU size of %d\n", new_mtu); if (new_mtu > PAGE_SIZE) ndev->features &= ~NETIF_F_GRO_HW; /* Set the mtu field and re-start the interface if needed */ args.u.mtu = new_mtu; args.func = &qede_update_mtu; qede_reload(edev, &args, false); #if IS_ENABLED(CONFIG_QED_RDMA) qede_rdma_event_change_mtu(edev); #endif edev->ops->common->update_mtu(edev->cdev, new_mtu); return 0; } static void qede_get_channels(struct net_device *dev, struct ethtool_channels *channels) { struct qede_dev *edev = netdev_priv(dev); channels->max_combined = QEDE_MAX_RSS_CNT(edev); channels->max_rx = QEDE_MAX_RSS_CNT(edev); channels->max_tx = QEDE_MAX_RSS_CNT(edev); channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx - edev->fp_num_rx; channels->tx_count = edev->fp_num_tx; channels->rx_count = edev->fp_num_rx; } static int qede_set_channels(struct net_device *dev, struct ethtool_channels *channels) { struct qede_dev *edev = netdev_priv(dev); u32 count; DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", channels->rx_count, channels->tx_count, channels->other_count, channels->combined_count); count = channels->rx_count + channels->tx_count + channels->combined_count; /* We don't support `other' channels */ if (channels->other_count) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "command parameters not supported\n"); return -EINVAL; } if (!(channels->combined_count || (channels->rx_count && channels->tx_count))) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "need to request at least one transmit and one receive channel\n"); return -EINVAL; } if (count > QEDE_MAX_RSS_CNT(edev)) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "requested channels = %d max supported channels = %d\n", count, QEDE_MAX_RSS_CNT(edev)); return -EINVAL; } /* Check if there was a change in the active parameters */ if ((count == QEDE_QUEUE_CNT(edev)) && (channels->tx_count == edev->fp_num_tx) && (channels->rx_count == edev->fp_num_rx)) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "No change in active parameters\n"); return 0; } /* We need the number of queues to be divisible between the hwfns */ if ((count % edev->dev_info.common.num_hwfns) || (channels->tx_count % edev->dev_info.common.num_hwfns) || (channels->rx_count % edev->dev_info.common.num_hwfns)) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "Number of channels must be divisible by %04x\n", edev->dev_info.common.num_hwfns); return -EINVAL; } /* Set number of queues and reload if necessary */ edev->req_queues = count; edev->req_num_tx = channels->tx_count; edev->req_num_rx = channels->rx_count; /* Reset the indirection table if rx queue count is updated */ if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) { edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED; memset(edev->rss_ind_table, 0, sizeof(edev->rss_ind_table)); } qede_reload(edev, NULL, false); return 0; } static int qede_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { struct qede_dev *edev = netdev_priv(dev); return qede_ptp_get_ts_info(edev, info); } static int qede_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { struct qede_dev *edev = netdev_priv(dev); u8 led_state = 0; switch (state) { case ETHTOOL_ID_ACTIVE: return 1; /* cycle on/off once per second */ case ETHTOOL_ID_ON: led_state = QED_LED_MODE_ON; break; case ETHTOOL_ID_OFF: led_state = QED_LED_MODE_OFF; break; case ETHTOOL_ID_INACTIVE: led_state = QED_LED_MODE_RESTORE; break; } edev->ops->common->set_led(edev->cdev, led_state); return 0; } static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) { info->data = RXH_IP_SRC | RXH_IP_DST; switch (info->flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V4_FLOW: if (edev->rss_caps & QED_RSS_IPV4_UDP) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V6_FLOW: if (edev->rss_caps & QED_RSS_IPV6_UDP) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case IPV4_FLOW: case IPV6_FLOW: break; default: info->data = 0; break; } return 0; } static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) { struct qede_dev *edev = netdev_priv(dev); int rc = 0; switch (info->cmd) { case ETHTOOL_GRXRINGS: info->data = QEDE_RSS_COUNT(edev); break; case ETHTOOL_GRXFH: rc = qede_get_rss_flags(edev, info); break; case ETHTOOL_GRXCLSRLCNT: info->rule_cnt = qede_get_arfs_filter_count(edev); info->data = QEDE_RFS_MAX_FLTR; break; case ETHTOOL_GRXCLSRULE: rc = qede_get_cls_rule_entry(edev, info); break; case ETHTOOL_GRXCLSRLALL: rc = qede_get_cls_rule_all(edev, info, rule_locs); break; default: DP_ERR(edev, "Command parameters not supported\n"); rc = -EOPNOTSUPP; } return rc; } static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) { struct qed_update_vport_params *vport_update_params; u8 set_caps = 0, clr_caps = 0; int rc = 0; DP_VERBOSE(edev, QED_MSG_DEBUG, "Set rss flags command parameters: flow type = %d, data = %llu\n", info->flow_type, info->data); switch (info->flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: /* For TCP only 4-tuple hash is supported */ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) { DP_INFO(edev, "Command parameters not supported\n"); return -EINVAL; } return 0; case UDP_V4_FLOW: /* For UDP either 2-tuple hash or 4-tuple hash is supported */ if (info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) { set_caps = QED_RSS_IPV4_UDP; DP_VERBOSE(edev, QED_MSG_DEBUG, "UDP 4-tuple enabled\n"); } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { clr_caps = QED_RSS_IPV4_UDP; DP_VERBOSE(edev, QED_MSG_DEBUG, "UDP 4-tuple disabled\n"); } else { return -EINVAL; } break; case UDP_V6_FLOW: /* For UDP either 2-tuple hash or 4-tuple hash is supported */ if (info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) { set_caps = QED_RSS_IPV6_UDP; DP_VERBOSE(edev, QED_MSG_DEBUG, "UDP 4-tuple enabled\n"); } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { clr_caps = QED_RSS_IPV6_UDP; DP_VERBOSE(edev, QED_MSG_DEBUG, "UDP 4-tuple disabled\n"); } else { return -EINVAL; } break; case IPV4_FLOW: case IPV6_FLOW: /* For IP only 2-tuple hash is supported */ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) { DP_INFO(edev, "Command parameters not supported\n"); return -EINVAL; } return 0; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: case IP_USER_FLOW: case ETHER_FLOW: /* RSS is not supported for these protocols */ if (info->data) { DP_INFO(edev, "Command parameters not supported\n"); return -EINVAL; } return 0; default: return -EINVAL; } /* No action is needed if there is no change in the rss capability */ if (edev->rss_caps == ((edev->rss_caps & ~clr_caps) | set_caps)) return 0; /* Update internal configuration */ edev->rss_caps = ((edev->rss_caps & ~clr_caps) | set_caps); edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; /* Re-configure if possible */ __qede_lock(edev); if (edev->state == QEDE_STATE_OPEN) { vport_update_params = vzalloc(sizeof(*vport_update_params)); if (!vport_update_params) { __qede_unlock(edev); return -ENOMEM; } qede_fill_rss_params(edev, &vport_update_params->rss_params, &vport_update_params->update_rss_flg); rc = edev->ops->vport_update(edev->cdev, vport_update_params); vfree(vport_update_params); } __qede_unlock(edev); return rc; } static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) { struct qede_dev *edev = netdev_priv(dev); int rc; switch (info->cmd) { case ETHTOOL_SRXFH: rc = qede_set_rss_flags(edev, info); break; case ETHTOOL_SRXCLSRLINS: rc = qede_add_cls_rule(edev, info); break; case ETHTOOL_SRXCLSRLDEL: rc = qede_delete_flow_filter(edev, info->fs.location); break; default: DP_INFO(edev, "Command parameters not supported\n"); rc = -EOPNOTSUPP; } return rc; } static u32 qede_get_rxfh_indir_size(struct net_device *dev) { return QED_RSS_IND_TABLE_SIZE; } static u32 qede_get_rxfh_key_size(struct net_device *dev) { struct qede_dev *edev = netdev_priv(dev); return sizeof(edev->rss_key); } static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) { struct qede_dev *edev = netdev_priv(dev); int i; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; if (!indir) return 0; for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) indir[i] = edev->rss_ind_table[i]; if (key) memcpy(key, edev->rss_key, qede_get_rxfh_key_size(dev)); return 0; } static int qede_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) { struct qed_update_vport_params *vport_update_params; struct qede_dev *edev = netdev_priv(dev); int i, rc = 0; if (edev->dev_info.common.num_hwfns > 1) { DP_INFO(edev, "RSS configuration is not supported for 100G devices\n"); return -EOPNOTSUPP; } if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; if (!indir && !key) return 0; if (indir) { for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) edev->rss_ind_table[i] = indir[i]; edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; } if (key) { memcpy(&edev->rss_key, key, qede_get_rxfh_key_size(dev)); edev->rss_params_inited |= QEDE_RSS_KEY_INITED; } __qede_lock(edev); if (edev->state == QEDE_STATE_OPEN) { vport_update_params = vzalloc(sizeof(*vport_update_params)); if (!vport_update_params) { __qede_unlock(edev); return -ENOMEM; } qede_fill_rss_params(edev, &vport_update_params->rss_params, &vport_update_params->update_rss_flg); rc = edev->ops->vport_update(edev->cdev, vport_update_params); vfree(vport_update_params); } __qede_unlock(edev); return rc; } /* This function enables the interrupt generation and the NAPI on the device */ static void qede_netif_start(struct qede_dev *edev) { int i; if (!netif_running(edev->ndev)) return; for_each_queue(i) { /* Update and reenable interrupts */ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1); napi_enable(&edev->fp_array[i].napi); } } /* This function disables the NAPI and the interrupt generation on the device */ static void qede_netif_stop(struct qede_dev *edev) { int i; for_each_queue(i) { napi_disable(&edev->fp_array[i].napi); /* Disable interrupts */ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0); } } static int qede_selftest_transmit_traffic(struct qede_dev *edev, struct sk_buff *skb) { struct qede_tx_queue *txq = NULL; struct eth_tx_1st_bd *first_bd; dma_addr_t mapping; int i, idx; u16 val; for_each_queue(i) { struct qede_fastpath *fp = &edev->fp_array[i]; if (fp->type & QEDE_FASTPATH_TX) { txq = QEDE_FP_TC0_TXQ(fp); break; } } if (!txq) { DP_NOTICE(edev, "Tx path is not available\n"); return -1; } /* Fill the entry in the SW ring and the BDs in the FW ring */ idx = txq->sw_tx_prod; txq->sw_tx_ring.skbs[idx].skb = skb; first_bd = qed_chain_produce(&txq->tx_pbl); memset(first_bd, 0, sizeof(*first_bd)); val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; first_bd->data.bd_flags.bitfields = val; val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK; val = val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; first_bd->data.bitfields |= cpu_to_le16(val); /* Map skb linear data for DMA and set in the first BD */ mapping = dma_map_single(&edev->pdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { DP_NOTICE(edev, "SKB mapping failed\n"); return -ENOMEM; } BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); /* update the first BD with the actual num BDs */ first_bd->data.nbds = 1; txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; /* 'next page' entries are counted in the producer value */ val = qed_chain_get_prod_idx(&txq->tx_pbl); txq->tx_db.data.bd_prod = cpu_to_le16(val); /* wmb makes sure that the BDs data is updated before updating the * producer, otherwise FW may read old data from the BDs. */ wmb(); barrier(); writel(txq->tx_db.raw, txq->doorbell_addr); for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { if (qede_txq_has_work(txq)) break; usleep_range(100, 200); } if (!qede_txq_has_work(txq)) { DP_NOTICE(edev, "Tx completion didn't happen\n"); return -1; } first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; txq->sw_tx_ring.skbs[idx].skb = NULL; return 0; } static int qede_selftest_receive_traffic(struct qede_dev *edev) { u16 sw_rx_index, len; struct eth_fast_path_rx_reg_cqe *fp_cqe; struct qede_rx_queue *rxq = NULL; struct sw_rx_data *sw_rx_data; union eth_rx_cqe *cqe; int i, iter, rc = 0; u8 *data_ptr; for_each_queue(i) { if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { rxq = edev->fp_array[i].rxq; break; } } if (!rxq) { DP_NOTICE(edev, "Rx path is not available\n"); return -1; } /* The packet is expected to receive on rx-queue 0 even though RSS is * enabled. This is because the queue 0 is configured as the default * queue and that the loopback traffic is not IP. */ for (iter = 0; iter < QEDE_SELFTEST_POLL_COUNT; iter++) { if (!qede_has_rx_work(rxq)) { usleep_range(100, 200); continue; } /* Get the CQE from the completion ring */ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); /* Get the data from the SW ring */ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; fp_cqe = &cqe->fast_path_regular; len = le16_to_cpu(fp_cqe->len_on_first_bd); data_ptr = (u8 *)(page_address(sw_rx_data->data) + fp_cqe->placement_offset + sw_rx_data->page_offset + rxq->rx_headroom); if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) && ether_addr_equal(data_ptr + ETH_ALEN, edev->ndev->dev_addr)) { for (i = ETH_HLEN; i < len; i++) if (data_ptr[i] != (unsigned char)(i & 0xff)) { rc = -1; break; } qede_recycle_rx_bd_ring(rxq, 1); qed_chain_recycle_consumed(&rxq->rx_comp_ring); break; } DP_INFO(edev, "Not the transmitted packet\n"); qede_recycle_rx_bd_ring(rxq, 1); qed_chain_recycle_consumed(&rxq->rx_comp_ring); } if (iter == QEDE_SELFTEST_POLL_COUNT) { DP_NOTICE(edev, "Failed to receive the traffic\n"); return -1; } qede_update_rx_prod(edev, rxq); return rc; } static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) { struct qed_link_params link_params; struct sk_buff *skb = NULL; int rc = 0, i; u32 pkt_size; u8 *packet; if (!netif_running(edev->ndev)) { DP_NOTICE(edev, "Interface is down\n"); return -EINVAL; } qede_netif_stop(edev); /* Bring up the link in Loopback mode */ memset(&link_params, 0, sizeof(link_params)); link_params.link_up = true; link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; link_params.loopback_mode = loopback_mode; edev->ops->common->set_link(edev->cdev, &link_params); /* Wait for loopback configuration to apply */ msleep_interruptible(500); /* Setting max packet size to 1.5K to avoid data being split over * multiple BDs in cases where MTU > PAGE_SIZE. */ pkt_size = (((edev->ndev->mtu < ETH_DATA_LEN) ? edev->ndev->mtu : ETH_DATA_LEN) + ETH_HLEN); skb = netdev_alloc_skb(edev->ndev, pkt_size); if (!skb) { DP_INFO(edev, "Can't allocate skb\n"); rc = -ENOMEM; goto test_loopback_exit; } packet = skb_put(skb, pkt_size); ether_addr_copy(packet, edev->ndev->dev_addr); ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr); memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN))); for (i = ETH_HLEN; i < pkt_size; i++) packet[i] = (unsigned char)(i & 0xff); rc = qede_selftest_transmit_traffic(edev, skb); if (rc) goto test_loopback_exit; rc = qede_selftest_receive_traffic(edev); if (rc) goto test_loopback_exit; DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n"); test_loopback_exit: dev_kfree_skb(skb); /* Bring up the link in Normal mode */ memset(&link_params, 0, sizeof(link_params)); link_params.link_up = true; link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; link_params.loopback_mode = QED_LINK_LOOPBACK_NONE; edev->ops->common->set_link(edev->cdev, &link_params); /* Wait for loopback configuration to apply */ msleep_interruptible(500); qede_netif_start(edev); return rc; } static void qede_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) { struct qede_dev *edev = netdev_priv(dev); DP_VERBOSE(edev, QED_MSG_DEBUG, "Self-test command parameters: offline = %d, external_lb = %d\n", (etest->flags & ETH_TEST_FL_OFFLINE), (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2); memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX); if (etest->flags & ETH_TEST_FL_OFFLINE) { if (qede_selftest_run_loopback(edev, QED_LINK_LOOPBACK_INT_PHY)) { buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1; etest->flags |= ETH_TEST_FL_FAILED; } } if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) { buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1; etest->flags |= ETH_TEST_FL_FAILED; } if (edev->ops->common->selftest->selftest_memory(edev->cdev)) { buf[QEDE_ETHTOOL_MEMORY_TEST] = 1; etest->flags |= ETH_TEST_FL_FAILED; } if (edev->ops->common->selftest->selftest_register(edev->cdev)) { buf[QEDE_ETHTOOL_REGISTER_TEST] = 1; etest->flags |= ETH_TEST_FL_FAILED; } if (edev->ops->common->selftest->selftest_clock(edev->cdev)) { buf[QEDE_ETHTOOL_CLOCK_TEST] = 1; etest->flags |= ETH_TEST_FL_FAILED; } if (edev->ops->common->selftest->selftest_nvram(edev->cdev)) { buf[QEDE_ETHTOOL_NVRAM_TEST] = 1; etest->flags |= ETH_TEST_FL_FAILED; } } static int qede_set_tunable(struct net_device *dev, const struct ethtool_tunable *tuna, const void *data) { struct qede_dev *edev = netdev_priv(dev); u32 val; switch (tuna->id) { case ETHTOOL_RX_COPYBREAK: val = *(u32 *)data; if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) { DP_VERBOSE(edev, QED_MSG_DEBUG, "Invalid rx copy break value, range is [%u, %u]", QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE); return -EINVAL; } edev->rx_copybreak = *(u32 *)data; break; default: return -EOPNOTSUPP; } return 0; } static int qede_get_tunable(struct net_device *dev, const struct ethtool_tunable *tuna, void *data) { struct qede_dev *edev = netdev_priv(dev); switch (tuna->id) { case ETHTOOL_RX_COPYBREAK: *(u32 *)data = edev->rx_copybreak; break; default: return -EOPNOTSUPP; } return 0; } static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata) { struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; memset(&current_link, 0, sizeof(current_link)); edev->ops->common->get_link(edev->cdev, &current_link); if (!current_link.eee_supported) { DP_INFO(edev, "EEE is not supported\n"); return -EOPNOTSUPP; } if (current_link.eee.adv_caps & QED_EEE_1G_ADV) edata->advertised = ADVERTISED_1000baseT_Full; if (current_link.eee.adv_caps & QED_EEE_10G_ADV) edata->advertised |= ADVERTISED_10000baseT_Full; if (current_link.sup_caps & QED_EEE_1G_ADV) edata->supported = ADVERTISED_1000baseT_Full; if (current_link.sup_caps & QED_EEE_10G_ADV) edata->supported |= ADVERTISED_10000baseT_Full; if (current_link.eee.lp_adv_caps & QED_EEE_1G_ADV) edata->lp_advertised = ADVERTISED_1000baseT_Full; if (current_link.eee.lp_adv_caps & QED_EEE_10G_ADV) edata->lp_advertised |= ADVERTISED_10000baseT_Full; edata->tx_lpi_timer = current_link.eee.tx_lpi_timer; edata->eee_enabled = current_link.eee.enable; edata->tx_lpi_enabled = current_link.eee.tx_lpi_enable; edata->eee_active = current_link.eee_active; return 0; } static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata) { struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; struct qed_link_params params; if (!edev->ops->common->can_link_change(edev->cdev)) { DP_INFO(edev, "Link settings are not allowed to be changed\n"); return -EOPNOTSUPP; } memset(&current_link, 0, sizeof(current_link)); edev->ops->common->get_link(edev->cdev, &current_link); if (!current_link.eee_supported) { DP_INFO(edev, "EEE is not supported\n"); return -EOPNOTSUPP; } memset(&params, 0, sizeof(params)); params.override_flags |= QED_LINK_OVERRIDE_EEE_CONFIG; if (!(edata->advertised & (ADVERTISED_1000baseT_Full | ADVERTISED_10000baseT_Full)) || ((edata->advertised & (ADVERTISED_1000baseT_Full | ADVERTISED_10000baseT_Full)) != edata->advertised)) { DP_VERBOSE(edev, QED_MSG_DEBUG, "Invalid advertised capabilities %d\n", edata->advertised); return -EINVAL; } if (edata->advertised & ADVERTISED_1000baseT_Full) params.eee.adv_caps = QED_EEE_1G_ADV; if (edata->advertised & ADVERTISED_10000baseT_Full) params.eee.adv_caps |= QED_EEE_10G_ADV; params.eee.enable = edata->eee_enabled; params.eee.tx_lpi_enable = edata->tx_lpi_enabled; params.eee.tx_lpi_timer = edata->tx_lpi_timer; params.link_up = true; edev->ops->common->set_link(edev->cdev, &params); return 0; } static u32 qede_link_to_ethtool_fec(u32 link_fec) { u32 eth_fec = 0; if (link_fec & QED_FEC_MODE_NONE) eth_fec |= ETHTOOL_FEC_OFF; if (link_fec & QED_FEC_MODE_FIRECODE) eth_fec |= ETHTOOL_FEC_BASER; if (link_fec & QED_FEC_MODE_RS) eth_fec |= ETHTOOL_FEC_RS; if (link_fec & QED_FEC_MODE_AUTO) eth_fec |= ETHTOOL_FEC_AUTO; if (link_fec & QED_FEC_MODE_UNSUPPORTED) eth_fec |= ETHTOOL_FEC_NONE; return eth_fec; } static u32 qede_ethtool_to_link_fec(u32 eth_fec) { u32 link_fec = 0; if (eth_fec & ETHTOOL_FEC_OFF) link_fec |= QED_FEC_MODE_NONE; if (eth_fec & ETHTOOL_FEC_BASER) link_fec |= QED_FEC_MODE_FIRECODE; if (eth_fec & ETHTOOL_FEC_RS) link_fec |= QED_FEC_MODE_RS; if (eth_fec & ETHTOOL_FEC_AUTO) link_fec |= QED_FEC_MODE_AUTO; if (eth_fec & ETHTOOL_FEC_NONE) link_fec |= QED_FEC_MODE_UNSUPPORTED; return link_fec; } static int qede_get_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam) { struct qede_dev *edev = netdev_priv(dev); struct qed_link_output curr_link; memset(&curr_link, 0, sizeof(curr_link)); edev->ops->common->get_link(edev->cdev, &curr_link); fecparam->active_fec = qede_link_to_ethtool_fec(curr_link.active_fec); fecparam->fec = qede_link_to_ethtool_fec(curr_link.sup_fec); return 0; } static int qede_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam) { struct qede_dev *edev = netdev_priv(dev); struct qed_link_params params; if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { DP_INFO(edev, "Link settings are not allowed to be changed\n"); return -EOPNOTSUPP; } memset(&params, 0, sizeof(params)); params.override_flags |= QED_LINK_OVERRIDE_FEC_CONFIG; params.fec = qede_ethtool_to_link_fec(fecparam->fec); params.link_up = true; edev->ops->common->set_link(edev->cdev, &params); return 0; } static int qede_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) { struct qede_dev *edev = netdev_priv(dev); u8 buf[4]; int rc; /* Read first 4 bytes to find the sfp type */ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf, QED_I2C_DEV_ADDR_A0, 0, 4); if (rc) { DP_ERR(edev, "Failed reading EEPROM data %d\n", rc); return rc; } switch (buf[0]) { case 0x3: /* SFP, SFP+, SFP-28 */ modinfo->type = ETH_MODULE_SFF_8472; modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; break; case 0xc: /* QSFP */ case 0xd: /* QSFP+ */ modinfo->type = ETH_MODULE_SFF_8436; modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; break; case 0x11: /* QSFP-28 */ modinfo->type = ETH_MODULE_SFF_8636; modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; break; default: DP_ERR(edev, "Unknown transceiver type 0x%x\n", buf[0]); return -EINVAL; } return 0; } static int qede_get_module_eeprom(struct net_device *dev, struct ethtool_eeprom *ee, u8 *data) { struct qede_dev *edev = netdev_priv(dev); u32 start_addr = ee->offset, size = 0; u8 *buf = data; int rc = 0; /* Read A0 section */ if (ee->offset < ETH_MODULE_SFF_8079_LEN) { /* Limit transfer size to the A0 section boundary */ if (ee->offset + ee->len > ETH_MODULE_SFF_8079_LEN) size = ETH_MODULE_SFF_8079_LEN - ee->offset; else size = ee->len; rc = edev->ops->common->read_module_eeprom(edev->cdev, buf, QED_I2C_DEV_ADDR_A0, start_addr, size); if (rc) { DP_ERR(edev, "Failed reading A0 section %d\n", rc); return rc; } buf += size; start_addr += size; } /* Read A2 section */ if (start_addr >= ETH_MODULE_SFF_8079_LEN && start_addr < ETH_MODULE_SFF_8472_LEN) { size = ee->len - size; /* Limit transfer size to the A2 section boundary */ if (start_addr + size > ETH_MODULE_SFF_8472_LEN) size = ETH_MODULE_SFF_8472_LEN - start_addr; start_addr -= ETH_MODULE_SFF_8079_LEN; rc = edev->ops->common->read_module_eeprom(edev->cdev, buf, QED_I2C_DEV_ADDR_A2, start_addr, size); if (rc) { DP_VERBOSE(edev, QED_MSG_DEBUG, "Failed reading A2 section %d\n", rc); return 0; } } return rc; } static int qede_set_dump(struct net_device *dev, struct ethtool_dump *val) { struct qede_dev *edev = netdev_priv(dev); int rc = 0; if (edev->dump_info.cmd == QEDE_DUMP_CMD_NONE) { if (val->flag > QEDE_DUMP_CMD_MAX) { DP_ERR(edev, "Invalid command %d\n", val->flag); return -EINVAL; } edev->dump_info.cmd = val->flag; edev->dump_info.num_args = 0; return 0; } if (edev->dump_info.num_args == QEDE_DUMP_MAX_ARGS) { DP_ERR(edev, "Arg count = %d\n", edev->dump_info.num_args); return -EINVAL; } switch (edev->dump_info.cmd) { case QEDE_DUMP_CMD_NVM_CFG: edev->dump_info.args[edev->dump_info.num_args] = val->flag; edev->dump_info.num_args++; break; case QEDE_DUMP_CMD_GRCDUMP: rc = edev->ops->common->set_grc_config(edev->cdev, val->flag, 1); break; default: break; } return rc; } static int qede_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) { struct qede_dev *edev = netdev_priv(dev); if (!edev->ops || !edev->ops->common) { DP_ERR(edev, "Edev ops not populated\n"); return -EINVAL; } dump->version = QEDE_DUMP_VERSION; switch (edev->dump_info.cmd) { case QEDE_DUMP_CMD_NVM_CFG: dump->flag = QEDE_DUMP_CMD_NVM_CFG; dump->len = edev->ops->common->read_nvm_cfg_len(edev->cdev, edev->dump_info.args[0]); break; case QEDE_DUMP_CMD_GRCDUMP: dump->flag = QEDE_DUMP_CMD_GRCDUMP; dump->len = edev->ops->common->dbg_all_data_size(edev->cdev); break; default: DP_ERR(edev, "Invalid cmd = %d\n", edev->dump_info.cmd); return -EINVAL; } DP_VERBOSE(edev, QED_MSG_DEBUG, "dump->version = 0x%x dump->flag = %d dump->len = %d\n", dump->version, dump->flag, dump->len); return 0; } static int qede_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, void *buf) { struct qede_dev *edev = netdev_priv(dev); int rc = 0; if (!edev->ops || !edev->ops->common) { DP_ERR(edev, "Edev ops not populated\n"); rc = -EINVAL; goto err; } switch (edev->dump_info.cmd) { case QEDE_DUMP_CMD_NVM_CFG: if (edev->dump_info.num_args != QEDE_DUMP_NVM_ARG_COUNT) { DP_ERR(edev, "Arg count = %d required = %d\n", edev->dump_info.num_args, QEDE_DUMP_NVM_ARG_COUNT); rc = -EINVAL; goto err; } rc = edev->ops->common->read_nvm_cfg(edev->cdev, (u8 **)&buf, edev->dump_info.args[0], edev->dump_info.args[1]); break; case QEDE_DUMP_CMD_GRCDUMP: memset(buf, 0, dump->len); rc = edev->ops->common->dbg_all_data(edev->cdev, buf); break; default: DP_ERR(edev, "Invalid cmd = %d\n", edev->dump_info.cmd); rc = -EINVAL; break; } err: edev->dump_info.cmd = QEDE_DUMP_CMD_NONE; edev->dump_info.num_args = 0; memset(edev->dump_info.args, 0, sizeof(edev->dump_info.args)); return rc; } int qede_set_per_coalesce(struct net_device *dev, u32 queue, struct ethtool_coalesce *coal) { struct qede_dev *edev = netdev_priv(dev); struct qede_fastpath *fp; u16 rxc, txc; int rc = 0; if (coal->rx_coalesce_usecs > QED_COALESCE_MAX || coal->tx_coalesce_usecs > QED_COALESCE_MAX) { DP_INFO(edev, "Can't support requested %s coalesce value [max supported value %d]\n", coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" : "tx", QED_COALESCE_MAX); return -EINVAL; } rxc = (u16)coal->rx_coalesce_usecs; txc = (u16)coal->tx_coalesce_usecs; __qede_lock(edev); if (queue >= edev->num_queues) { DP_INFO(edev, "Invalid queue\n"); rc = -EINVAL; goto out; } if (edev->state != QEDE_STATE_OPEN) { rc = -EINVAL; goto out; } fp = &edev->fp_array[queue]; if (edev->fp_array[queue].type & QEDE_FASTPATH_RX) { rc = edev->ops->common->set_coalesce(edev->cdev, rxc, 0, fp->rxq->handle); if (rc) { DP_INFO(edev, "Set RX coalesce error, rc = %d\n", rc); goto out; } edev->coal_entry[queue].rxc = rxc; edev->coal_entry[queue].isvalid = true; } if (edev->fp_array[queue].type & QEDE_FASTPATH_TX) { rc = edev->ops->common->set_coalesce(edev->cdev, 0, txc, fp->txq->handle); if (rc) { DP_INFO(edev, "Set TX coalesce error, rc = %d\n", rc); goto out; } edev->coal_entry[queue].txc = txc; edev->coal_entry[queue].isvalid = true; } out: __qede_unlock(edev); return rc; } static int qede_get_per_coalesce(struct net_device *dev, u32 queue, struct ethtool_coalesce *coal) { void *rx_handle = NULL, *tx_handle = NULL; struct qede_dev *edev = netdev_priv(dev); struct qede_fastpath *fp; u16 rx_coal, tx_coal; int rc = 0; rx_coal = QED_DEFAULT_RX_USECS; tx_coal = QED_DEFAULT_TX_USECS; memset(coal, 0, sizeof(struct ethtool_coalesce)); __qede_lock(edev); if (queue >= edev->num_queues) { DP_INFO(edev, "Invalid queue\n"); rc = -EINVAL; goto out; } if (edev->state != QEDE_STATE_OPEN) { rc = -EINVAL; goto out; } fp = &edev->fp_array[queue]; if (fp->type & QEDE_FASTPATH_RX) rx_handle = fp->rxq->handle; rc = edev->ops->get_coalesce(edev->cdev, &rx_coal, rx_handle); if (rc) { DP_INFO(edev, "Read Rx coalesce error\n"); goto out; } fp = &edev->fp_array[queue]; if (fp->type & QEDE_FASTPATH_TX) tx_handle = fp->txq->handle; rc = edev->ops->get_coalesce(edev->cdev, &tx_coal, tx_handle); if (rc) DP_INFO(edev, "Read Tx coalesce error\n"); out: __qede_unlock(edev); coal->rx_coalesce_usecs = rx_coal; coal->tx_coalesce_usecs = tx_coal; return rc; } static const struct ethtool_ops qede_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_STATS_BLOCK_USECS, .get_link_ksettings = qede_get_link_ksettings, .set_link_ksettings = qede_set_link_ksettings, .get_drvinfo = qede_get_drvinfo, .get_regs_len = qede_get_regs_len, .get_regs = qede_get_regs, .get_wol = qede_get_wol, .set_wol = qede_set_wol, .get_msglevel = qede_get_msglevel, .set_msglevel = qede_set_msglevel, .nway_reset = qede_nway_reset, .get_link = qede_get_link, .get_coalesce = qede_get_coalesce, .set_coalesce = qede_set_coalesce, .get_ringparam = qede_get_ringparam, .set_ringparam = qede_set_ringparam, .get_pauseparam = qede_get_pauseparam, .set_pauseparam = qede_set_pauseparam, .get_strings = qede_get_strings, .set_phys_id = qede_set_phys_id, .get_ethtool_stats = qede_get_ethtool_stats, .get_priv_flags = qede_get_priv_flags, .set_priv_flags = qede_set_priv_flags, .get_sset_count = qede_get_sset_count, .get_rxnfc = qede_get_rxnfc, .set_rxnfc = qede_set_rxnfc, .get_rxfh_indir_size = qede_get_rxfh_indir_size, .get_rxfh_key_size = qede_get_rxfh_key_size, .get_rxfh = qede_get_rxfh, .set_rxfh = qede_set_rxfh, .get_ts_info = qede_get_ts_info, .get_channels = qede_get_channels, .set_channels = qede_set_channels, .self_test = qede_self_test, .get_module_info = qede_get_module_info, .get_module_eeprom = qede_get_module_eeprom, .get_eee = qede_get_eee, .set_eee = qede_set_eee, .get_fecparam = qede_get_fecparam, .set_fecparam = qede_set_fecparam, .get_tunable = qede_get_tunable, .set_tunable = qede_set_tunable, .get_per_queue_coalesce = qede_get_per_coalesce, .set_per_queue_coalesce = qede_set_per_coalesce, .flash_device = qede_flash_device, .get_dump_flag = qede_get_dump_flag, .get_dump_data = qede_get_dump_data, .set_dump = qede_set_dump, }; static const struct ethtool_ops qede_vf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_STATS_BLOCK_USECS, .get_link_ksettings = qede_get_link_ksettings, .get_drvinfo = qede_get_drvinfo, .get_msglevel = qede_get_msglevel, .set_msglevel = qede_set_msglevel, .get_link = qede_get_link, .get_coalesce = qede_get_coalesce, .set_coalesce = qede_set_coalesce, .get_ringparam = qede_get_ringparam, .set_ringparam = qede_set_ringparam, .get_strings = qede_get_strings, .get_ethtool_stats = qede_get_ethtool_stats, .get_priv_flags = qede_get_priv_flags, .get_sset_count = qede_get_sset_count, .get_rxnfc = qede_get_rxnfc, .set_rxnfc = qede_set_rxnfc, .get_rxfh_indir_size = qede_get_rxfh_indir_size, .get_rxfh_key_size = qede_get_rxfh_key_size, .get_rxfh = qede_get_rxfh, .set_rxfh = qede_set_rxfh, .get_channels = qede_get_channels, .set_channels = qede_set_channels, .get_per_queue_coalesce = qede_get_per_coalesce, .set_per_queue_coalesce = qede_set_per_coalesce, .get_tunable = qede_get_tunable, .set_tunable = qede_set_tunable, }; void qede_set_ethtool_ops(struct net_device *dev) { struct qede_dev *edev = netdev_priv(dev); if (IS_VF(edev)) dev->ethtool_ops = &qede_vf_ethtool_ops; else dev->ethtool_ops = &qede_ethtool_ops; }
linux-master
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/bpf_trace.h> #include <net/udp_tunnel.h> #include <linux/ip.h> #include <net/gro.h> #include <net/ipv6.h> #include <net/tcp.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <net/ip6_checksum.h> #include "qede_ptp.h" #include <linux/qed/qed_if.h> #include "qede.h" /********************************* * Content also used by slowpath * *********************************/ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy) { struct sw_rx_data *sw_rx_data; struct eth_rx_bd *rx_bd; dma_addr_t mapping; struct page *data; /* In case lazy-allocation is allowed, postpone allocation until the * end of the NAPI run. We'd still need to make sure the Rx ring has * sufficient buffers to guarantee an additional Rx interrupt. */ if (allow_lazy && likely(rxq->filled_buffers > 12)) { rxq->filled_buffers--; return 0; } data = alloc_pages(GFP_ATOMIC, 0); if (unlikely(!data)) return -ENOMEM; /* Map the entire page as it would be used * for multiple RX buffer segment size mapping. */ mapping = dma_map_page(rxq->dev, data, 0, PAGE_SIZE, rxq->data_direction); if (unlikely(dma_mapping_error(rxq->dev, mapping))) { __free_page(data); return -ENOMEM; } sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; sw_rx_data->page_offset = 0; sw_rx_data->data = data; sw_rx_data->mapping = mapping; /* Advance PROD and get BD pointer */ rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); WARN_ON(!rx_bd); rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) + rxq->rx_headroom); rxq->sw_rx_prod++; rxq->filled_buffers++; return 0; } /* Unmap the data and free skb */ int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) { u16 idx = txq->sw_tx_cons; struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; struct eth_tx_1st_bd *first_bd; struct eth_tx_bd *tx_data_bd; int bds_consumed = 0; int nbds; bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; int i, split_bd_len = 0; if (unlikely(!skb)) { DP_ERR(edev, "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n", idx, txq->sw_tx_cons, txq->sw_tx_prod); return -1; } *len = skb->len; first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); bds_consumed++; nbds = first_bd->data.nbds; if (data_split) { struct eth_tx_bd *split = (struct eth_tx_bd *) qed_chain_consume(&txq->tx_pbl); split_bd_len = BD_UNMAP_LEN(split); bds_consumed++; } dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); /* Unmap the data of the skb frags */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { tx_data_bd = (struct eth_tx_bd *) qed_chain_consume(&txq->tx_pbl); dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); } while (bds_consumed++ < nbds) qed_chain_consume(&txq->tx_pbl); /* Free skb */ dev_kfree_skb_any(skb); txq->sw_tx_ring.skbs[idx].skb = NULL; txq->sw_tx_ring.skbs[idx].flags = 0; return 0; } /* Unmap the data and free skb when mapping failed during start_xmit */ static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq, struct eth_tx_1st_bd *first_bd, int nbd, bool data_split) { u16 idx = txq->sw_tx_prod; struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; struct eth_tx_bd *tx_data_bd; int i, split_bd_len = 0; /* Return prod to its position before this skb was handled */ qed_chain_set_prod(&txq->tx_pbl, le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); if (data_split) { struct eth_tx_bd *split = (struct eth_tx_bd *) qed_chain_produce(&txq->tx_pbl); split_bd_len = BD_UNMAP_LEN(split); nbd--; } dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); /* Unmap the data of the skb frags */ for (i = 0; i < nbd; i++) { tx_data_bd = (struct eth_tx_bd *) qed_chain_produce(&txq->tx_pbl); if (tx_data_bd->nbytes) dma_unmap_page(txq->dev, BD_UNMAP_ADDR(tx_data_bd), BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); } /* Return again prod to its position before this skb was handled */ qed_chain_set_prod(&txq->tx_pbl, le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); /* Free skb */ dev_kfree_skb_any(skb); txq->sw_tx_ring.skbs[idx].skb = NULL; txq->sw_tx_ring.skbs[idx].flags = 0; } static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext) { u32 rc = XMIT_L4_CSUM; __be16 l3_proto; if (skb->ip_summed != CHECKSUM_PARTIAL) return XMIT_PLAIN; l3_proto = vlan_get_protocol(skb); if (l3_proto == htons(ETH_P_IPV6) && (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) *ipv6_ext = 1; if (skb->encapsulation) { rc |= XMIT_ENC; if (skb_is_gso(skb)) { unsigned short gso_type = skb_shinfo(skb)->gso_type; if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) || (gso_type & SKB_GSO_GRE_CSUM)) rc |= XMIT_ENC_GSO_L4_CSUM; rc |= XMIT_LSO; return rc; } } if (skb_is_gso(skb)) rc |= XMIT_LSO; return rc; } static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, struct eth_tx_2nd_bd *second_bd, struct eth_tx_3rd_bd *third_bd) { u8 l4_proto; u16 bd2_bits1 = 0, bd2_bits2 = 0; bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT); if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) l4_proto = ipv6_hdr(skb)->nexthdr; else l4_proto = ip_hdr(skb)->protocol; if (l4_proto == IPPROTO_UDP) bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; if (third_bd) third_bd->data.bitfields |= cpu_to_le16(((tcp_hdrlen(skb) / 4) & ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT); second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); } static int map_frag_to_bd(struct qede_tx_queue *txq, skb_frag_t *frag, struct eth_tx_bd *bd) { dma_addr_t mapping; /* Map skb non-linear frag data for DMA */ mapping = skb_frag_dma_map(txq->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(txq->dev, mapping))) return -ENOMEM; /* Setup the data pointer of the frag data */ BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); return 0; } static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt) { if (is_encap_pkt) return skb_inner_tcp_all_headers(skb); return skb_tcp_all_headers(skb); } /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type) { int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; if (xmit_type & XMIT_LSO) { int hlen; hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC); /* linear payload would require its own BD */ if (skb_headlen(skb) > hlen) allowed_frags--; } return (skb_shinfo(skb)->nr_frags > allowed_frags); } #endif static inline void qede_update_tx_producer(struct qede_tx_queue *txq) { /* wmb makes sure that the BDs data is updated before updating the * producer, otherwise FW may read old data from the BDs. */ wmb(); barrier(); writel(txq->tx_db.raw, txq->doorbell_addr); /* Fence required to flush the write combined buffer, since another * CPU may write to the same doorbell address and data may be lost * due to relaxed order nature of write combined bar. */ wmb(); } static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad, u16 len, struct page *page, struct xdp_frame *xdpf) { struct eth_tx_1st_bd *bd; struct sw_tx_xdp *xdp; u16 val; if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >= txq->num_tx_buffers)) { txq->stopped_cnt++; return -ENOMEM; } bd = qed_chain_produce(&txq->tx_pbl); bd->data.nbds = 1; bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; bd->data.bitfields = cpu_to_le16(val); /* We can safely ignore the offset, as it's 0 for XDP */ BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len); xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod; xdp->mapping = dma; xdp->page = page; xdp->xdpf = xdpf; txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; return 0; } int qede_xdp_transmit(struct net_device *dev, int n_frames, struct xdp_frame **frames, u32 flags) { struct qede_dev *edev = netdev_priv(dev); struct device *dmadev = &edev->pdev->dev; struct qede_tx_queue *xdp_tx; struct xdp_frame *xdpf; dma_addr_t mapping; int i, nxmit = 0; u16 xdp_prod; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; if (unlikely(!netif_running(dev))) return -ENETDOWN; i = smp_processor_id() % edev->total_xdp_queues; xdp_tx = edev->fp_array[i].xdp_tx; spin_lock(&xdp_tx->xdp_tx_lock); for (i = 0; i < n_frames; i++) { xdpf = frames[i]; mapping = dma_map_single(dmadev, xdpf->data, xdpf->len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dmadev, mapping))) break; if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len, NULL, xdpf))) break; nxmit++; } if (flags & XDP_XMIT_FLUSH) { xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl); xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); qede_update_tx_producer(xdp_tx); } spin_unlock(&xdp_tx->xdp_tx_lock); return nxmit; } int qede_txq_has_work(struct qede_tx_queue *txq) { u16 hw_bd_cons; /* Tell compiler that consumer and producer can change */ barrier(); hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) return 0; return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); } static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) { struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp; struct device *dev = &edev->pdev->dev; struct xdp_frame *xdpf; u16 hw_bd_cons; hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); barrier(); while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { xdp_info = xdp_arr + txq->sw_tx_cons; xdpf = xdp_info->xdpf; if (xdpf) { dma_unmap_single(dev, xdp_info->mapping, xdpf->len, DMA_TO_DEVICE); xdp_return_frame(xdpf); xdp_info->xdpf = NULL; } else { dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(xdp_info->page); } qed_chain_consume(&txq->tx_pbl); txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; txq->xmit_pkts++; } } static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) { unsigned int pkts_compl = 0, bytes_compl = 0; struct netdev_queue *netdev_txq; u16 hw_bd_cons; int rc; netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); barrier(); while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { int len = 0; rc = qede_free_tx_pkt(edev, txq, &len); if (rc) { DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n", hw_bd_cons, qed_chain_get_cons_idx(&txq->tx_pbl)); break; } bytes_compl += len; pkts_compl++; txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; txq->xmit_pkts++; } netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); /* Need to make the tx_bd_cons update visible to start_xmit() * before checking for netif_tx_queue_stopped(). Without the * memory barrier, there is a small possibility that * start_xmit() will miss it and cause the queue to be stopped * forever. * On the other hand we need an rmb() here to ensure the proper * ordering of bit testing in the following * netif_tx_queue_stopped(txq) call. */ smp_mb(); if (unlikely(netif_tx_queue_stopped(netdev_txq))) { /* Taking tx_lock is needed to prevent reenabling the queue * while it's empty. This could have happen if rx_action() gets * suspended in qede_tx_int() after the condition before * netif_tx_wake_queue(), while tx_action (qede_start_xmit()): * * stops the queue->sees fresh tx_bd_cons->releases the queue-> * sends some packets consuming the whole queue again-> * stops the queue */ __netif_tx_lock(netdev_txq, smp_processor_id()); if ((netif_tx_queue_stopped(netdev_txq)) && (edev->state == QEDE_STATE_OPEN) && (qed_chain_get_elem_left(&txq->tx_pbl) >= (MAX_SKB_FRAGS + 1))) { netif_tx_wake_queue(netdev_txq); DP_VERBOSE(edev, NETIF_MSG_TX_DONE, "Wake queue was called\n"); } __netif_tx_unlock(netdev_txq); } return 0; } bool qede_has_rx_work(struct qede_rx_queue *rxq) { u16 hw_comp_cons, sw_comp_cons; /* Tell compiler that status block fields can change */ barrier(); hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); return hw_comp_cons != sw_comp_cons; } static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) { qed_chain_consume(&rxq->rx_bd_ring); rxq->sw_rx_cons++; } /* This function reuses the buffer(from an offset) from * consumer index to producer index in the bd ring */ static inline void qede_reuse_page(struct qede_rx_queue *rxq, struct sw_rx_data *curr_cons) { struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); struct sw_rx_data *curr_prod; dma_addr_t new_mapping; curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; *curr_prod = *curr_cons; new_mapping = curr_prod->mapping + curr_prod->page_offset; rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) + rxq->rx_headroom); rxq->sw_rx_prod++; curr_cons->data = NULL; } /* In case of allocation failures reuse buffers * from consumer index to produce buffers for firmware */ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count) { struct sw_rx_data *curr_cons; for (; count > 0; count--) { curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; qede_reuse_page(rxq, curr_cons); qede_rx_bd_ring_consume(rxq); } } static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq, struct sw_rx_data *curr_cons) { /* Move to the next segment in the page */ curr_cons->page_offset += rxq->rx_buf_seg_size; if (curr_cons->page_offset == PAGE_SIZE) { if (unlikely(qede_alloc_rx_buffer(rxq, true))) { /* Since we failed to allocate new buffer * current buffer can be used again. */ curr_cons->page_offset -= rxq->rx_buf_seg_size; return -ENOMEM; } dma_unmap_page(rxq->dev, curr_cons->mapping, PAGE_SIZE, rxq->data_direction); } else { /* Increment refcount of the page as we don't want * network stack to take the ownership of the page * which can be recycled multiple times by the driver. */ page_ref_inc(curr_cons->data); qede_reuse_page(rxq, curr_cons); } return 0; } void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) { u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); struct eth_rx_prod_data rx_prods = {0}; /* Update producers */ rx_prods.bd_prod = cpu_to_le16(bd_prod); rx_prods.cqe_prod = cpu_to_le16(cqe_prod); /* Make sure that the BD and SGE data is updated before updating the * producers since FW might read the BD/SGE right after the producer * is updated. */ wmb(); internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), (u32 *)&rx_prods); } static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash) { enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE; enum rss_hash_type htype; u32 hash = 0; htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE); if (htype) { hash_type = ((htype == RSS_HASH_TYPE_IPV4) || (htype == RSS_HASH_TYPE_IPV6)) ? PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4; hash = le32_to_cpu(rss_hash); } skb_set_hash(skb, hash, hash_type); } static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) { skb_checksum_none_assert(skb); if (csum_flag & QEDE_CSUM_UNNECESSARY) skb->ip_summed = CHECKSUM_UNNECESSARY; if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) { skb->csum_level = 1; skb->encapsulation = 1; } } static inline void qede_skb_receive(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_rx_queue *rxq, struct sk_buff *skb, u16 vlan_tag) { if (vlan_tag) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); napi_gro_receive(&fp->napi, skb); } static void qede_set_gro_params(struct qede_dev *edev, struct sk_buff *skb, struct eth_fast_path_rx_tpa_start_cqe *cqe) { u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2) skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; else skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - cqe->header_len; } static int qede_fill_frag_skb(struct qede_dev *edev, struct qede_rx_queue *rxq, u8 tpa_agg_index, u16 len_on_bd) { struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; struct sk_buff *skb = tpa_info->skb; if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) goto out; /* Add one frag and update the appropriate fields in the skb */ skb_fill_page_desc(skb, tpa_info->frag_id++, current_bd->data, current_bd->page_offset + rxq->rx_headroom, len_on_bd); if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) { /* Incr page ref count to reuse on allocation failure * so that it doesn't get freed while freeing SKB. */ page_ref_inc(current_bd->data); goto out; } qede_rx_bd_ring_consume(rxq); skb->data_len += len_on_bd; skb->truesize += rxq->rx_buf_seg_size; skb->len += len_on_bd; return 0; out: tpa_info->state = QEDE_AGG_STATE_ERROR; qede_recycle_rx_bd_ring(rxq, 1); return -ENOMEM; } static bool qede_tunn_exist(u16 flag) { return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT)); } static u8 qede_check_tunn_csum(u16 flag) { u16 csum_flag = 0; u8 tcsum = 0; if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK << PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT)) csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT; if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; tcsum = QEDE_TUNN_CSUM_UNNECESSARY; } csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; if (csum_flag & flag) return QEDE_CSUM_ERROR; return QEDE_CSUM_UNNECESSARY | tcsum; } static inline struct sk_buff * qede_build_skb(struct qede_rx_queue *rxq, struct sw_rx_data *bd, u16 len, u16 pad) { struct sk_buff *skb; void *buf; buf = page_address(bd->data) + bd->page_offset; skb = build_skb(buf, rxq->rx_buf_seg_size); if (unlikely(!skb)) return NULL; skb_reserve(skb, pad); skb_put(skb, len); return skb; } static struct sk_buff * qede_tpa_rx_build_skb(struct qede_dev *edev, struct qede_rx_queue *rxq, struct sw_rx_data *bd, u16 len, u16 pad, bool alloc_skb) { struct sk_buff *skb; skb = qede_build_skb(rxq, bd, len, pad); bd->page_offset += rxq->rx_buf_seg_size; if (bd->page_offset == PAGE_SIZE) { if (unlikely(qede_alloc_rx_buffer(rxq, true))) { DP_NOTICE(edev, "Failed to allocate RX buffer for tpa start\n"); bd->page_offset -= rxq->rx_buf_seg_size; page_ref_inc(bd->data); dev_kfree_skb_any(skb); return NULL; } } else { page_ref_inc(bd->data); qede_reuse_page(rxq, bd); } /* We've consumed the first BD and prepared an SKB */ qede_rx_bd_ring_consume(rxq); return skb; } static struct sk_buff * qede_rx_build_skb(struct qede_dev *edev, struct qede_rx_queue *rxq, struct sw_rx_data *bd, u16 len, u16 pad) { struct sk_buff *skb = NULL; /* For smaller frames still need to allocate skb, memcpy * data and benefit in reusing the page segment instead of * un-mapping it. */ if ((len + pad <= edev->rx_copybreak)) { unsigned int offset = bd->page_offset + pad; skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); if (unlikely(!skb)) return NULL; skb_reserve(skb, pad); skb_put_data(skb, page_address(bd->data) + offset, len); qede_reuse_page(rxq, bd); goto out; } skb = qede_build_skb(rxq, bd, len, pad); if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { /* Incr page ref count to reuse on allocation failure so * that it doesn't get freed while freeing SKB [as its * already mapped there]. */ page_ref_inc(bd->data); dev_kfree_skb_any(skb); return NULL; } out: /* We've consumed the first BD and prepared an SKB */ qede_rx_bd_ring_consume(rxq); return skb; } static void qede_tpa_start(struct qede_dev *edev, struct qede_rx_queue *rxq, struct eth_fast_path_rx_tpa_start_cqe *cqe) { struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; struct sw_rx_data *sw_rx_data_cons; u16 pad; sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; pad = cqe->placement_offset + rxq->rx_headroom; tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons, le16_to_cpu(cqe->len_on_first_bd), pad, false); tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset; tpa_info->buffer.mapping = sw_rx_data_cons->mapping; if (unlikely(!tpa_info->skb)) { DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); /* Consume from ring but do not produce since * this might be used by FW still, it will be re-used * at TPA end. */ tpa_info->tpa_start_fail = true; qede_rx_bd_ring_consume(rxq); tpa_info->state = QEDE_AGG_STATE_ERROR; goto cons_buf; } tpa_info->frag_id = 0; tpa_info->state = QEDE_AGG_STATE_START; if ((le16_to_cpu(cqe->pars_flags.flags) >> PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) & PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); else tpa_info->vlan_tag = 0; qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); /* This is needed in order to enable forwarding support */ qede_set_gro_params(edev, tpa_info->skb, cqe); cons_buf: /* We still need to handle bd_len_list to consume buffers */ if (likely(cqe->bw_ext_bd_len_list[0])) qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, le16_to_cpu(cqe->bw_ext_bd_len_list[0])); if (unlikely(cqe->bw_ext_bd_len_list[1])) { DP_ERR(edev, "Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n"); tpa_info->state = QEDE_AGG_STATE_ERROR; } } #ifdef CONFIG_INET static void qede_gro_ip_csum(struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); struct tcphdr *th; skb_set_transport_header(skb, sizeof(struct iphdr)); th = tcp_hdr(skb); th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), iph->saddr, iph->daddr, 0); tcp_gro_complete(skb); } static void qede_gro_ipv6_csum(struct sk_buff *skb) { struct ipv6hdr *iph = ipv6_hdr(skb); struct tcphdr *th; skb_set_transport_header(skb, sizeof(struct ipv6hdr)); th = tcp_hdr(skb); th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), &iph->saddr, &iph->daddr, 0); tcp_gro_complete(skb); } #endif static void qede_gro_receive(struct qede_dev *edev, struct qede_fastpath *fp, struct sk_buff *skb, u16 vlan_tag) { /* FW can send a single MTU sized packet from gro flow * due to aggregation timeout/last segment etc. which * is not expected to be a gro packet. If a skb has zero * frags then simply push it in the stack as non gso skb. */ if (unlikely(!skb->data_len)) { skb_shinfo(skb)->gso_type = 0; skb_shinfo(skb)->gso_size = 0; goto send_skb; } #ifdef CONFIG_INET if (skb_shinfo(skb)->gso_size) { skb_reset_network_header(skb); switch (skb->protocol) { case htons(ETH_P_IP): qede_gro_ip_csum(skb); break; case htons(ETH_P_IPV6): qede_gro_ipv6_csum(skb); break; default: DP_ERR(edev, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", ntohs(skb->protocol)); } } #endif send_skb: skb_record_rx_queue(skb, fp->rxq->rxq_id); qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag); } static inline void qede_tpa_cont(struct qede_dev *edev, struct qede_rx_queue *rxq, struct eth_fast_path_rx_tpa_cont_cqe *cqe) { int i; for (i = 0; cqe->len_list[i]; i++) qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, le16_to_cpu(cqe->len_list[i])); if (unlikely(i > 1)) DP_ERR(edev, "Strange - TPA cont with more than a single len_list entry\n"); } static int qede_tpa_end(struct qede_dev *edev, struct qede_fastpath *fp, struct eth_fast_path_rx_tpa_end_cqe *cqe) { struct qede_rx_queue *rxq = fp->rxq; struct qede_agg_info *tpa_info; struct sk_buff *skb; int i; tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; skb = tpa_info->skb; if (tpa_info->buffer.page_offset == PAGE_SIZE) dma_unmap_page(rxq->dev, tpa_info->buffer.mapping, PAGE_SIZE, rxq->data_direction); for (i = 0; cqe->len_list[i]; i++) qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, le16_to_cpu(cqe->len_list[i])); if (unlikely(i > 1)) DP_ERR(edev, "Strange - TPA emd with more than a single len_list entry\n"); if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) goto err; /* Sanity */ if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) DP_ERR(edev, "Strange - TPA had %02x BDs, but SKB has only %d frags\n", cqe->num_of_bds, tpa_info->frag_id); if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) DP_ERR(edev, "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", le16_to_cpu(cqe->total_packet_len), skb->len); /* Finalize the SKB */ skb->protocol = eth_type_trans(skb, edev->ndev); skb->ip_summed = CHECKSUM_UNNECESSARY; /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count * to skb_shinfo(skb)->gso_segs */ NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag); tpa_info->state = QEDE_AGG_STATE_NONE; return 1; err: tpa_info->state = QEDE_AGG_STATE_NONE; if (tpa_info->tpa_start_fail) { qede_reuse_page(rxq, &tpa_info->buffer); tpa_info->tpa_start_fail = false; } dev_kfree_skb_any(tpa_info->skb); tpa_info->skb = NULL; return 0; } static u8 qede_check_notunn_csum(u16 flag) { u16 csum_flag = 0; u8 csum = 0; if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; csum = QEDE_CSUM_UNNECESSARY; } csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; if (csum_flag & flag) return QEDE_CSUM_ERROR; return csum; } static u8 qede_check_csum(u16 flag) { if (!qede_tunn_exist(flag)) return qede_check_notunn_csum(flag); else return qede_check_tunn_csum(flag); } static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, u16 flag) { u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK << ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) || (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK << PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT))) return true; return false; } /* Return true iff packet is to be passed to stack */ static bool qede_rx_xdp(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_rx_queue *rxq, struct bpf_prog *prog, struct sw_rx_data *bd, struct eth_fast_path_rx_reg_cqe *cqe, u16 *data_offset, u16 *len) { struct xdp_buff xdp; enum xdp_action act; xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq); xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset, *len, false); act = bpf_prog_run_xdp(prog, &xdp); /* Recalculate, as XDP might have changed the headers */ *data_offset = xdp.data - xdp.data_hard_start; *len = xdp.data_end - xdp.data; if (act == XDP_PASS) return true; /* Count number of packets not to be passed to stack */ rxq->xdp_no_pass++; switch (act) { case XDP_TX: /* We need the replacement buffer before transmit. */ if (unlikely(qede_alloc_rx_buffer(rxq, true))) { qede_recycle_rx_bd_ring(rxq, 1); trace_xdp_exception(edev->ndev, prog, act); break; } /* Now if there's a transmission problem, we'd still have to * throw current buffer, as replacement was already allocated. */ if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping, *data_offset, *len, bd->data, NULL))) { dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, rxq->data_direction); __free_page(bd->data); trace_xdp_exception(edev->ndev, prog, act); } else { dma_sync_single_for_device(rxq->dev, bd->mapping + *data_offset, *len, rxq->data_direction); fp->xdp_xmit |= QEDE_XDP_TX; } /* Regardless, we've consumed an Rx BD */ qede_rx_bd_ring_consume(rxq); break; case XDP_REDIRECT: /* We need the replacement buffer before transmit. */ if (unlikely(qede_alloc_rx_buffer(rxq, true))) { qede_recycle_rx_bd_ring(rxq, 1); trace_xdp_exception(edev->ndev, prog, act); break; } dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, rxq->data_direction); if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog))) DP_NOTICE(edev, "Failed to redirect the packet\n"); else fp->xdp_xmit |= QEDE_XDP_REDIRECT; qede_rx_bd_ring_consume(rxq); break; default: bpf_warn_invalid_xdp_action(edev->ndev, prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(edev->ndev, prog, act); fallthrough; case XDP_DROP: qede_recycle_rx_bd_ring(rxq, cqe->bd_num); } return false; } static int qede_rx_build_jumbo(struct qede_dev *edev, struct qede_rx_queue *rxq, struct sk_buff *skb, struct eth_fast_path_rx_reg_cqe *cqe, u16 first_bd_len) { u16 pkt_len = le16_to_cpu(cqe->pkt_len); struct sw_rx_data *bd; u16 bd_cons_idx; u8 num_frags; pkt_len -= first_bd_len; /* We've already used one BD for the SKB. Now take care of the rest */ for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : pkt_len; if (unlikely(!cur_size)) { DP_ERR(edev, "Still got %d BDs for mapping jumbo, but length became 0\n", num_frags); goto out; } /* We need a replacement buffer for each BD */ if (unlikely(qede_alloc_rx_buffer(rxq, true))) goto out; /* Now that we've allocated the replacement buffer, * we can safely consume the next BD and map it to the SKB. */ bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; bd = &rxq->sw_rx_ring[bd_cons_idx]; qede_rx_bd_ring_consume(rxq); dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, DMA_FROM_DEVICE); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, bd->data, rxq->rx_headroom, cur_size, PAGE_SIZE); pkt_len -= cur_size; } if (unlikely(pkt_len)) DP_ERR(edev, "Mapped all BDs of jumbo, but still have %d bytes\n", pkt_len); out: return num_frags; } static int qede_rx_process_tpa_cqe(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_rx_queue *rxq, union eth_rx_cqe *cqe, enum eth_rx_cqe_type type) { switch (type) { case ETH_RX_CQE_TYPE_TPA_START: qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); return 0; case ETH_RX_CQE_TYPE_TPA_CONT: qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); return 0; case ETH_RX_CQE_TYPE_TPA_END: return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); default: return 0; } } static int qede_rx_process_cqe(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_rx_queue *rxq) { struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog); struct eth_fast_path_rx_reg_cqe *fp_cqe; u16 len, pad, bd_cons_idx, parse_flag; enum eth_rx_cqe_type cqe_type; union eth_rx_cqe *cqe; struct sw_rx_data *bd; struct sk_buff *skb; __le16 flags; u8 csum_flag; /* Get the CQE from the completion ring */ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); cqe_type = cqe->fast_path_regular.type; /* Process an unlikely slowpath event */ if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { struct eth_slow_path_rx_cqe *sp_cqe; sp_cqe = (struct eth_slow_path_rx_cqe *)cqe; edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe); return 0; } /* Handle TPA cqes */ if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type); /* Get the data from the SW ring; Consume it only after it's evident * we wouldn't recycle it. */ bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; bd = &rxq->sw_rx_ring[bd_cons_idx]; fp_cqe = &cqe->fast_path_regular; len = le16_to_cpu(fp_cqe->len_on_first_bd); pad = fp_cqe->placement_offset + rxq->rx_headroom; /* Run eBPF program if one is attached */ if (xdp_prog) if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe, &pad, &len)) return 0; /* If this is an error packet then drop it */ flags = cqe->fast_path_regular.pars_flags.flags; parse_flag = le16_to_cpu(flags); csum_flag = qede_check_csum(parse_flag); if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) rxq->rx_ip_frags++; else rxq->rx_hw_errors++; } /* Basic validation passed; Need to prepare an SKB. This would also * guarantee to finally consume the first BD upon success. */ skb = qede_rx_build_skb(edev, rxq, bd, len, pad); if (!skb) { rxq->rx_alloc_errors++; qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); return 0; } /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed * by a single cqe. */ if (fp_cqe->bd_num > 1) { u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb, fp_cqe, len); if (unlikely(unmapped_frags > 0)) { qede_recycle_rx_bd_ring(rxq, unmapped_frags); dev_kfree_skb_any(skb); return 0; } } /* The SKB contains all the data. Now prepare meta-magic */ skb->protocol = eth_type_trans(skb, edev->ndev); qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash); qede_set_skb_csum(skb, csum_flag); skb_record_rx_queue(skb, rxq->rxq_id); qede_ptp_record_rx_ts(edev, cqe, skb); /* SKB is prepared - pass it to stack */ qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); return 1; } static int qede_rx_int(struct qede_fastpath *fp, int budget) { struct qede_rx_queue *rxq = fp->rxq; struct qede_dev *edev = fp->edev; int work_done = 0, rcv_pkts = 0; u16 hw_comp_cons, sw_comp_cons; hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); /* Memory barrier to prevent the CPU from doing speculative reads of CQE * / BD in the while-loop before reading hw_comp_cons. If the CQE is * read before it is written by FW, then FW writes CQE and SB, and then * the CPU reads the hw_comp_cons, it will use an old CQE. */ rmb(); /* Loop to complete all indicated BDs */ while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) { rcv_pkts += qede_rx_process_cqe(edev, fp, rxq); qed_chain_recycle_consumed(&rxq->rx_comp_ring); sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); work_done++; } rxq->rcv_pkts += rcv_pkts; /* Allocate replacement buffers */ while (rxq->num_rx_buffers - rxq->filled_buffers) if (qede_alloc_rx_buffer(rxq, false)) break; /* Update producers */ qede_update_rx_prod(edev, rxq); return work_done; } static bool qede_poll_is_more_work(struct qede_fastpath *fp) { qed_sb_update_sb_idx(fp->sb_info); /* *_has_*_work() reads the status block, thus we need to ensure that * status block indices have been actually read (qed_sb_update_sb_idx) * prior to this check (*_has_*_work) so that we won't write the * "newer" value of the status block to HW (if there was a DMA right * after qede_has_rx_work and if there is no rmb, the memory reading * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb). * In this case there will never be another interrupt until there is * another update of the status block, while there is still unhandled * work. */ rmb(); if (likely(fp->type & QEDE_FASTPATH_RX)) if (qede_has_rx_work(fp->rxq)) return true; if (fp->type & QEDE_FASTPATH_XDP) if (qede_txq_has_work(fp->xdp_tx)) return true; if (likely(fp->type & QEDE_FASTPATH_TX)) { int cos; for_each_cos_in_txq(fp->edev, cos) { if (qede_txq_has_work(&fp->txq[cos])) return true; } } return false; } /********************* * NDO & API related * *********************/ int qede_poll(struct napi_struct *napi, int budget) { struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, napi); struct qede_dev *edev = fp->edev; int rx_work_done = 0; u16 xdp_prod; fp->xdp_xmit = 0; if (likely(fp->type & QEDE_FASTPATH_TX)) { int cos; for_each_cos_in_txq(fp->edev, cos) { if (qede_txq_has_work(&fp->txq[cos])) qede_tx_int(edev, &fp->txq[cos]); } } if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx)) qede_xdp_tx_int(edev, fp->xdp_tx); rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && qede_has_rx_work(fp->rxq)) ? qede_rx_int(fp, budget) : 0; if (fp->xdp_xmit & QEDE_XDP_REDIRECT) xdp_do_flush(); /* Handle case where we are called by netpoll with a budget of 0 */ if (rx_work_done < budget || !budget) { if (!qede_poll_is_more_work(fp)) { napi_complete_done(napi, rx_work_done); /* Update and reenable interrupts */ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); } else { rx_work_done = budget; } } if (fp->xdp_xmit & QEDE_XDP_TX) { xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); qede_update_tx_producer(fp->xdp_tx); } return rx_work_done; } irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) { struct qede_fastpath *fp = fp_cookie; qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); napi_schedule_irqoff(&fp->napi); return IRQ_HANDLED; } /* Main transmit function */ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); struct netdev_queue *netdev_txq; struct qede_tx_queue *txq; struct eth_tx_1st_bd *first_bd; struct eth_tx_2nd_bd *second_bd = NULL; struct eth_tx_3rd_bd *third_bd = NULL; struct eth_tx_bd *tx_data_bd = NULL; u16 txq_index, val = 0; u8 nbd = 0; dma_addr_t mapping; int rc, frag_idx = 0, ipv6_ext = 0; u8 xmit_type; u16 idx; u16 hlen; bool data_split = false; /* Get tx-queue context and netdev index */ txq_index = skb_get_queue_mapping(skb); WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc); txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index); netdev_txq = netdev_get_tx_queue(ndev, txq_index); WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); xmit_type = qede_xmit_type(skb, &ipv6_ext); #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) if (qede_pkt_req_lin(skb, xmit_type)) { if (skb_linearize(skb)) { txq->tx_mem_alloc_err++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } } #endif /* Fill the entry in the SW ring and the BDs in the FW ring */ idx = txq->sw_tx_prod; txq->sw_tx_ring.skbs[idx].skb = skb; first_bd = (struct eth_tx_1st_bd *) qed_chain_produce(&txq->tx_pbl); memset(first_bd, 0, sizeof(*first_bd)); first_bd->data.bd_flags.bitfields = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) qede_ptp_tx_ts(edev, skb); /* Map skb linear data for DMA and set in the first BD */ mapping = dma_map_single(txq->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(txq->dev, mapping))) { DP_NOTICE(edev, "SKB mapping failed\n"); qede_free_failed_tx_pkt(txq, first_bd, 0, false); qede_update_tx_producer(txq); return NETDEV_TX_OK; } nbd++; BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); /* In case there is IPv6 with extension headers or LSO we need 2nd and * 3rd BDs. */ if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) { second_bd = (struct eth_tx_2nd_bd *) qed_chain_produce(&txq->tx_pbl); memset(second_bd, 0, sizeof(*second_bd)); nbd++; third_bd = (struct eth_tx_3rd_bd *) qed_chain_produce(&txq->tx_pbl); memset(third_bd, 0, sizeof(*third_bd)); nbd++; /* We need to fill in additional data in second_bd... */ tx_data_bd = (struct eth_tx_bd *)second_bd; } if (skb_vlan_tag_present(skb)) { first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; } /* Fill the parsing flags & params according to the requested offload */ if (xmit_type & XMIT_L4_CSUM) { /* We don't re-calculate IP checksum as it is already done by * the upper stack */ first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; if (xmit_type & XMIT_ENC) { first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT); } /* Legacy FW had flipped behavior in regard to this bit - * I.e., needed to set to prevent FW from touching encapsulated * packets when it didn't need to. */ if (unlikely(txq->is_legacy)) val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT); /* If the packet is IPv6 with extension header, indicate that * to FW and pass few params, since the device cracker doesn't * support parsing IPv6 with extension header/s. */ if (unlikely(ipv6_ext)) qede_set_params_for_ipv6_ext(skb, second_bd, third_bd); } if (xmit_type & XMIT_LSO) { first_bd->data.bd_flags.bitfields |= (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); third_bd->data.lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); if (unlikely(xmit_type & XMIT_ENC)) { first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; if (xmit_type & XMIT_ENC_GSO_L4_CSUM) { u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT; first_bd->data.bd_flags.bitfields |= 1 << tmp; } hlen = qede_get_skb_hlen(skb, true); } else { first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; hlen = qede_get_skb_hlen(skb, false); } /* @@@TBD - if will not be removed need to check */ third_bd->data.bitfields |= cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); /* Make life easier for FW guys who can't deal with header and * data on same BD. If we need to split, use the second bd... */ if (unlikely(skb_headlen(skb) > hlen)) { DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n", first_bd->nbytes, first_bd->addr.hi, first_bd->addr.lo); mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), le32_to_cpu(first_bd->addr.lo)) + hlen; BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, le16_to_cpu(first_bd->nbytes) - hlen); /* this marks the BD as one that has no * individual mapping */ txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; first_bd->nbytes = cpu_to_le16(hlen); tx_data_bd = (struct eth_tx_bd *)third_bd; data_split = true; } } else { if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) { DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len); qede_free_failed_tx_pkt(txq, first_bd, 0, false); qede_update_tx_producer(txq); return NETDEV_TX_OK; } val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT); } first_bd->data.bitfields = cpu_to_le16(val); /* Handle fragmented skb */ /* special handle for frags inside 2nd and 3rd bds.. */ while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { rc = map_frag_to_bd(txq, &skb_shinfo(skb)->frags[frag_idx], tx_data_bd); if (rc) { qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); qede_update_tx_producer(txq); return NETDEV_TX_OK; } if (tx_data_bd == (struct eth_tx_bd *)second_bd) tx_data_bd = (struct eth_tx_bd *)third_bd; else tx_data_bd = NULL; frag_idx++; } /* map last frags into 4th, 5th .... */ for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { tx_data_bd = (struct eth_tx_bd *) qed_chain_produce(&txq->tx_pbl); memset(tx_data_bd, 0, sizeof(*tx_data_bd)); rc = map_frag_to_bd(txq, &skb_shinfo(skb)->frags[frag_idx], tx_data_bd); if (rc) { qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split); qede_update_tx_producer(txq); return NETDEV_TX_OK; } } /* update the first BD with the actual num BDs */ first_bd->data.nbds = nbd; netdev_tx_sent_queue(netdev_txq, skb->len); skb_tx_timestamp(skb); /* Advance packet producer only before sending the packet since mapping * of pages may fail. */ txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; /* 'next page' entries are counted in the producer value */ txq->tx_db.data.bd_prod = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq)) qede_update_tx_producer(txq); if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1))) { if (netdev_xmit_more()) qede_update_tx_producer(txq); netif_tx_stop_queue(netdev_txq); txq->stopped_cnt++; DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, "Stop queue was called\n"); /* paired memory barrier is in qede_tx_int(), we have to keep * ordering of set_bit() in netif_tx_stop_queue() and read of * fp->bd_tx_cons */ smp_mb(); if ((qed_chain_get_elem_left(&txq->tx_pbl) >= (MAX_SKB_FRAGS + 1)) && (edev->state == QEDE_STATE_OPEN)) { netif_tx_wake_queue(netdev_txq); DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, "Wake queue was called\n"); } } return NETDEV_TX_OK; } u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { struct qede_dev *edev = netdev_priv(dev); int total_txq; total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; return QEDE_TSS_COUNT(edev) ? netdev_pick_tx(dev, skb, NULL) % total_txq : 0; } /* 8B udp header + 8B base tunnel header + 32B option length */ #define QEDE_MAX_TUN_HDR_LEN 48 netdev_features_t qede_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { if (skb->encapsulation) { u8 l4_proto = 0; switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): l4_proto = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6): l4_proto = ipv6_hdr(skb)->nexthdr; break; default: return features; } /* Disable offloads for geneve tunnels, as HW can't parse * the geneve header which has option length greater than 32b * and disable offloads for the ports which are not offloaded. */ if (l4_proto == IPPROTO_UDP) { struct qede_dev *edev = netdev_priv(dev); u16 hdrlen, vxln_port, gnv_port; hdrlen = QEDE_MAX_TUN_HDR_LEN; vxln_port = edev->vxlan_dst_port; gnv_port = edev->geneve_dst_port; if ((skb_inner_mac_header(skb) - skb_transport_header(skb)) > hdrlen || (ntohs(udp_hdr(skb)->dest) != vxln_port && ntohs(udp_hdr(skb)->dest) != gnv_port)) return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } else if (l4_proto == IPPROTO_IPIP) { /* IPIP tunnels are unknown to the device or at least unsupported natively, * offloads for them can't be done trivially, so disable them for such skb. */ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } } return features; }
linux-master
drivers/net/ethernet/qlogic/qede/qede_fp.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include "qede_ptp.h" #define QEDE_PTP_TX_TIMEOUT (2 * HZ) struct qede_ptp { const struct qed_eth_ptp_ops *ops; struct ptp_clock_info clock_info; struct cyclecounter cc; struct timecounter tc; struct ptp_clock *clock; struct work_struct work; unsigned long ptp_tx_start; struct qede_dev *edev; struct sk_buff *tx_skb; /* ptp spinlock is used for protecting the cycle/time counter fields * and, also for serializing the qed PTP API invocations. */ spinlock_t lock; bool hw_ts_ioctl_called; u16 tx_type; u16 rx_filter; }; /** * qede_ptp_adjfine() - Adjust the frequency of the PTP cycle counter. * * @info: The PTP clock info structure. * @scaled_ppm: Scaled parts per million adjustment from base. * * Scaled parts per million is ppm with a 16-bit binary fractional field. * * Return: Zero on success, negative errno otherwise. */ static int qede_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) { struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info); s32 ppb = scaled_ppm_to_ppb(scaled_ppm); struct qede_dev *edev = ptp->edev; int rc; __qede_lock(edev); if (edev->state == QEDE_STATE_OPEN) { spin_lock_bh(&ptp->lock); rc = ptp->ops->adjfreq(edev->cdev, ppb); spin_unlock_bh(&ptp->lock); } else { DP_ERR(edev, "PTP adjfine called while interface is down\n"); rc = -EFAULT; } __qede_unlock(edev); return rc; } static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta) { struct qede_dev *edev; struct qede_ptp *ptp; ptp = container_of(info, struct qede_ptp, clock_info); edev = ptp->edev; DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n", delta); spin_lock_bh(&ptp->lock); timecounter_adjtime(&ptp->tc, delta); spin_unlock_bh(&ptp->lock); return 0; } static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts) { struct qede_dev *edev; struct qede_ptp *ptp; u64 ns; ptp = container_of(info, struct qede_ptp, clock_info); edev = ptp->edev; spin_lock_bh(&ptp->lock); ns = timecounter_read(&ptp->tc); spin_unlock_bh(&ptp->lock); DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns); *ts = ns_to_timespec64(ns); return 0; } static int qede_ptp_settime(struct ptp_clock_info *info, const struct timespec64 *ts) { struct qede_dev *edev; struct qede_ptp *ptp; u64 ns; ptp = container_of(info, struct qede_ptp, clock_info); edev = ptp->edev; ns = timespec64_to_ns(ts); DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns); /* Re-init the timecounter */ spin_lock_bh(&ptp->lock); timecounter_init(&ptp->tc, &ptp->cc, ns); spin_unlock_bh(&ptp->lock); return 0; } /* Enable (or disable) ancillary features of the phc subsystem */ static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info, struct ptp_clock_request *rq, int on) { struct qede_dev *edev; struct qede_ptp *ptp; ptp = container_of(info, struct qede_ptp, clock_info); edev = ptp->edev; DP_ERR(edev, "PHC ancillary features are not supported\n"); return -ENOTSUPP; } static void qede_ptp_task(struct work_struct *work) { struct skb_shared_hwtstamps shhwtstamps; struct qede_dev *edev; struct qede_ptp *ptp; u64 timestamp, ns; bool timedout; int rc; ptp = container_of(work, struct qede_ptp, work); edev = ptp->edev; timedout = time_is_before_jiffies(ptp->ptp_tx_start + QEDE_PTP_TX_TIMEOUT); /* Read Tx timestamp registers */ spin_lock_bh(&ptp->lock); rc = ptp->ops->read_tx_ts(edev->cdev, &timestamp); spin_unlock_bh(&ptp->lock); if (rc) { if (unlikely(timedout)) { DP_INFO(edev, "Tx timestamp is not recorded\n"); dev_kfree_skb_any(ptp->tx_skb); ptp->tx_skb = NULL; clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags); edev->ptp_skip_txts++; } else { /* Reschedule to keep checking for a valid TS value */ schedule_work(&ptp->work); } return; } ns = timecounter_cyc2time(&ptp->tc, timestamp); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(ns); skb_tstamp_tx(ptp->tx_skb, &shhwtstamps); dev_kfree_skb_any(ptp->tx_skb); ptp->tx_skb = NULL; clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags); DP_VERBOSE(edev, QED_MSG_DEBUG, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", timestamp, ns); } /* Read the PHC. This API is invoked with ptp_lock held. */ static u64 qede_ptp_read_cc(const struct cyclecounter *cc) { struct qede_dev *edev; struct qede_ptp *ptp; u64 phc_cycles; int rc; ptp = container_of(cc, struct qede_ptp, cc); edev = ptp->edev; rc = ptp->ops->read_cc(edev->cdev, &phc_cycles); if (rc) WARN_ONCE(1, "PHC read err %d\n", rc); DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles); return phc_cycles; } static int qede_ptp_cfg_filters(struct qede_dev *edev) { enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON; enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE; struct qede_ptp *ptp = edev->ptp; if (!ptp) return -EIO; if (!ptp->hw_ts_ioctl_called) { DP_INFO(edev, "TS IOCTL not called\n"); return 0; } switch (ptp->tx_type) { case HWTSTAMP_TX_ON: set_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags); tx_type = QED_PTP_HWTSTAMP_TX_ON; break; case HWTSTAMP_TX_OFF: clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags); tx_type = QED_PTP_HWTSTAMP_TX_OFF; break; case HWTSTAMP_TX_ONESTEP_SYNC: case HWTSTAMP_TX_ONESTEP_P2P: DP_ERR(edev, "One-step timestamping is not supported\n"); return -ERANGE; } spin_lock_bh(&ptp->lock); switch (ptp->rx_filter) { case HWTSTAMP_FILTER_NONE: rx_filter = QED_PTP_FILTER_NONE; break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_NTP_ALL: ptp->rx_filter = HWTSTAMP_FILTER_NONE; rx_filter = QED_PTP_FILTER_ALL; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; rx_filter = QED_PTP_FILTER_V1_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; /* Initialize PTP detection for UDP/IPv4 events */ rx_filter = QED_PTP_FILTER_V1_L4_GEN; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; rx_filter = QED_PTP_FILTER_V2_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */ rx_filter = QED_PTP_FILTER_V2_L4_GEN; break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; rx_filter = QED_PTP_FILTER_V2_L2_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; /* Initialize PTP detection L2 events */ rx_filter = QED_PTP_FILTER_V2_L2_GEN; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; rx_filter = QED_PTP_FILTER_V2_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */ rx_filter = QED_PTP_FILTER_V2_GEN; break; } ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type); spin_unlock_bh(&ptp->lock); return 0; } int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr) { struct hwtstamp_config config; struct qede_ptp *ptp; int rc; ptp = edev->ptp; if (!ptp) return -EIO; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; DP_VERBOSE(edev, QED_MSG_DEBUG, "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n", config.tx_type, config.rx_filter); ptp->hw_ts_ioctl_called = 1; ptp->tx_type = config.tx_type; ptp->rx_filter = config.rx_filter; rc = qede_ptp_cfg_filters(edev); if (rc) return rc; config.rx_filter = ptp->rx_filter; return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) { struct qede_ptp *ptp = edev->ptp; if (!ptp) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; info->phc_index = -1; return 0; } info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; if (ptp->clock) info->phc_index = ptp_clock_index(ptp->clock); else info->phc_index = -1; info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); return 0; } void qede_ptp_disable(struct qede_dev *edev) { struct qede_ptp *ptp; ptp = edev->ptp; if (!ptp) return; if (ptp->clock) { ptp_clock_unregister(ptp->clock); ptp->clock = NULL; } /* Cancel PTP work queue. Should be done after the Tx queues are * drained to prevent additional scheduling. */ cancel_work_sync(&ptp->work); if (ptp->tx_skb) { dev_kfree_skb_any(ptp->tx_skb); ptp->tx_skb = NULL; clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags); } /* Disable PTP in HW */ spin_lock_bh(&ptp->lock); ptp->ops->disable(edev->cdev); spin_unlock_bh(&ptp->lock); kfree(ptp); edev->ptp = NULL; } static int qede_ptp_init(struct qede_dev *edev) { struct qede_ptp *ptp; int rc; ptp = edev->ptp; if (!ptp) return -EINVAL; spin_lock_init(&ptp->lock); /* Configure PTP in HW */ rc = ptp->ops->enable(edev->cdev); if (rc) { DP_INFO(edev, "PTP HW enable failed\n"); return rc; } /* Init work queue for Tx timestamping */ INIT_WORK(&ptp->work, qede_ptp_task); /* Init cyclecounter and timecounter */ memset(&ptp->cc, 0, sizeof(ptp->cc)); ptp->cc.read = qede_ptp_read_cc; ptp->cc.mask = CYCLECOUNTER_MASK(64); ptp->cc.shift = 0; ptp->cc.mult = 1; timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real())); return 0; } int qede_ptp_enable(struct qede_dev *edev) { struct qede_ptp *ptp; int rc; ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); if (!ptp) { DP_INFO(edev, "Failed to allocate struct for PTP\n"); return -ENOMEM; } ptp->edev = edev; ptp->ops = edev->ops->ptp; if (!ptp->ops) { DP_INFO(edev, "PTP enable failed\n"); rc = -EIO; goto err1; } edev->ptp = ptp; rc = qede_ptp_init(edev); if (rc) goto err1; qede_ptp_cfg_filters(edev); /* Fill the ptp_clock_info struct and register PTP clock */ ptp->clock_info.owner = THIS_MODULE; snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name); ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB; ptp->clock_info.n_alarm = 0; ptp->clock_info.n_ext_ts = 0; ptp->clock_info.n_per_out = 0; ptp->clock_info.pps = 0; ptp->clock_info.adjfine = qede_ptp_adjfine; ptp->clock_info.adjtime = qede_ptp_adjtime; ptp->clock_info.gettime64 = qede_ptp_gettime; ptp->clock_info.settime64 = qede_ptp_settime; ptp->clock_info.enable = qede_ptp_ancillary_feature_enable; ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); if (IS_ERR(ptp->clock)) { DP_ERR(edev, "PTP clock registration failed\n"); qede_ptp_disable(edev); rc = -EINVAL; goto err2; } return 0; err1: kfree(ptp); err2: edev->ptp = NULL; return rc; } void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb) { struct qede_ptp *ptp; ptp = edev->ptp; if (!ptp) return; if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags)) { DP_VERBOSE(edev, QED_MSG_DEBUG, "Timestamping in progress\n"); edev->ptp_skip_txts++; return; } if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) { DP_VERBOSE(edev, QED_MSG_DEBUG, "Tx timestamping was not enabled, this pkt will not be timestamped\n"); clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags); edev->ptp_skip_txts++; } else if (unlikely(ptp->tx_skb)) { DP_VERBOSE(edev, QED_MSG_DEBUG, "Device supports a single outstanding pkt to ts, It will not be ts\n"); clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags); edev->ptp_skip_txts++; } else { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; /* schedule check for Tx timestamp */ ptp->tx_skb = skb_get(skb); ptp->ptp_tx_start = jiffies; schedule_work(&ptp->work); } } void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb) { struct qede_ptp *ptp; u64 timestamp, ns; int rc; ptp = edev->ptp; if (!ptp) return; spin_lock_bh(&ptp->lock); rc = ptp->ops->read_rx_ts(edev->cdev, &timestamp); if (rc) { spin_unlock_bh(&ptp->lock); DP_INFO(edev, "Invalid Rx timestamp\n"); return; } ns = timecounter_cyc2time(&ptp->tc, timestamp); spin_unlock_bh(&ptp->lock); skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); DP_VERBOSE(edev, QED_MSG_DEBUG, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n", timestamp, ns); }
linux-master
drivers/net/ethernet/qlogic/qede/qede_ptp.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/crash_dump.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/string.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <asm/byteorder.h> #include <asm/param.h> #include <linux/io.h> #include <linux/netdev_features.h> #include <linux/udp.h> #include <linux/tcp.h> #include <net/udp_tunnel.h> #include <linux/ip.h> #include <net/ipv6.h> #include <net/tcp.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/pkt_sched.h> #include <linux/ethtool.h> #include <linux/in.h> #include <linux/random.h> #include <net/ip6_checksum.h> #include <linux/bitops.h> #include <linux/vmalloc.h> #include "qede.h" #include "qede_ptp.h" MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver"); MODULE_LICENSE("GPL"); static uint debug; module_param(debug, uint, 0); MODULE_PARM_DESC(debug, " Default debug msglevel"); static const struct qed_eth_ops *qed_ops; #define CHIP_NUM_57980S_40 0x1634 #define CHIP_NUM_57980S_10 0x1666 #define CHIP_NUM_57980S_MF 0x1636 #define CHIP_NUM_57980S_100 0x1644 #define CHIP_NUM_57980S_50 0x1654 #define CHIP_NUM_57980S_25 0x1656 #define CHIP_NUM_57980S_IOV 0x1664 #define CHIP_NUM_AH 0x8070 #define CHIP_NUM_AH_IOV 0x8090 #ifndef PCI_DEVICE_ID_NX2_57980E #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV #define PCI_DEVICE_ID_AH CHIP_NUM_AH #define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV #endif enum qede_pci_private { QEDE_PRIVATE_PF, QEDE_PRIVATE_VF }; static const struct pci_device_id qede_pci_tbl[] = { {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF}, {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF}, {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF}, {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, #ifdef CONFIG_QED_SRIOV {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, #endif {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF}, #ifdef CONFIG_QED_SRIOV {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF}, #endif { 0 } }; MODULE_DEVICE_TABLE(pci, qede_pci_tbl); static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); static pci_ers_result_t qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state); #define TX_TIMEOUT (5 * HZ) /* Utilize last protocol index for XDP */ #define XDP_PI 11 static void qede_remove(struct pci_dev *pdev); static void qede_shutdown(struct pci_dev *pdev); static void qede_link_update(void *dev, struct qed_link_output *link); static void qede_schedule_recovery_handler(void *dev); static void qede_recovery_handler(struct qede_dev *edev); static void qede_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type); static void qede_get_eth_tlv_data(void *edev, void *data); static void qede_get_generic_tlv_data(void *edev, struct qed_generic_tlvs *data); static void qede_generic_hw_err_handler(struct qede_dev *edev); #ifdef CONFIG_QED_SRIOV static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) { struct qede_dev *edev = netdev_priv(ndev); if (vlan > 4095) { DP_NOTICE(edev, "Illegal vlan value %d\n", vlan); return -EINVAL; } if (vlan_proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n", vlan, vf); return edev->ops->iov->set_vlan(edev->cdev, vlan, vf); } static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac) { struct qede_dev *edev = netdev_priv(ndev); DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx); if (!is_valid_ether_addr(mac)) { DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n"); return -EINVAL; } return edev->ops->iov->set_mac(edev->cdev, mac, vfidx); } static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) { struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev)); struct qed_dev_info *qed_info = &edev->dev_info.common; struct qed_update_vport_params *vport_params; int rc; vport_params = vzalloc(sizeof(*vport_params)); if (!vport_params) return -ENOMEM; DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param); rc = edev->ops->iov->configure(edev->cdev, num_vfs_param); /* Enable/Disable Tx switching for PF */ if ((rc == num_vfs_param) && netif_running(edev->ndev) && !qed_info->b_inter_pf_switch && qed_info->tx_switching) { vport_params->vport_id = 0; vport_params->update_tx_switching_flg = 1; vport_params->tx_switching_flg = num_vfs_param ? 1 : 0; edev->ops->vport_update(edev->cdev, vport_params); } vfree(vport_params); return rc; } #endif static int __maybe_unused qede_suspend(struct device *dev) { dev_info(dev, "Device does not support suspend operation\n"); return -EOPNOTSUPP; } static DEFINE_SIMPLE_DEV_PM_OPS(qede_pm_ops, qede_suspend, NULL); static const struct pci_error_handlers qede_err_handler = { .error_detected = qede_io_error_detected, }; static struct pci_driver qede_pci_driver = { .name = "qede", .id_table = qede_pci_tbl, .probe = qede_probe, .remove = qede_remove, .shutdown = qede_shutdown, #ifdef CONFIG_QED_SRIOV .sriov_configure = qede_sriov_configure, #endif .err_handler = &qede_err_handler, .driver.pm = &qede_pm_ops, }; static struct qed_eth_cb_ops qede_ll_ops = { { #ifdef CONFIG_RFS_ACCEL .arfs_filter_op = qede_arfs_filter_op, #endif .link_update = qede_link_update, .schedule_recovery_handler = qede_schedule_recovery_handler, .schedule_hw_err_handler = qede_schedule_hw_err_handler, .get_generic_tlv_data = qede_get_generic_tlv_data, .get_protocol_tlv_data = qede_get_eth_tlv_data, }, .force_mac = qede_force_mac, .ports_update = qede_udp_ports_update, }; static int qede_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct ethtool_drvinfo drvinfo; struct qede_dev *edev; if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR) goto done; /* Check whether this is a qede device */ if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo) goto done; memset(&drvinfo, 0, sizeof(drvinfo)); ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo); if (strcmp(drvinfo.driver, "qede")) goto done; edev = netdev_priv(ndev); switch (event) { case NETDEV_CHANGENAME: /* Notify qed of the name change */ if (!edev->ops || !edev->ops->common) goto done; edev->ops->common->set_name(edev->cdev, edev->ndev->name); break; case NETDEV_CHANGEADDR: edev = netdev_priv(ndev); qede_rdma_event_changeaddr(edev); break; } done: return NOTIFY_DONE; } static struct notifier_block qede_netdev_notifier = { .notifier_call = qede_netdev_event, }; static int __init qede_init(void) { int ret; pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n"); qede_forced_speed_maps_init(); qed_ops = qed_get_eth_ops(); if (!qed_ops) { pr_notice("Failed to get qed ethtool operations\n"); return -EINVAL; } /* Must register notifier before pci ops, since we might miss * interface rename after pci probe and netdev registration. */ ret = register_netdevice_notifier(&qede_netdev_notifier); if (ret) { pr_notice("Failed to register netdevice_notifier\n"); qed_put_eth_ops(); return -EINVAL; } ret = pci_register_driver(&qede_pci_driver); if (ret) { pr_notice("Failed to register driver\n"); unregister_netdevice_notifier(&qede_netdev_notifier); qed_put_eth_ops(); return -EINVAL; } return 0; } static void __exit qede_cleanup(void) { if (debug & QED_LOG_INFO_MASK) pr_info("qede_cleanup called\n"); unregister_netdevice_notifier(&qede_netdev_notifier); pci_unregister_driver(&qede_pci_driver); qed_put_eth_ops(); } module_init(qede_init); module_exit(qede_cleanup); static int qede_open(struct net_device *ndev); static int qede_close(struct net_device *ndev); void qede_fill_by_demand_stats(struct qede_dev *edev) { struct qede_stats_common *p_common = &edev->stats.common; struct qed_eth_stats stats; edev->ops->get_vport_stats(edev->cdev, &stats); spin_lock(&edev->stats_lock); p_common->no_buff_discards = stats.common.no_buff_discards; p_common->packet_too_big_discard = stats.common.packet_too_big_discard; p_common->ttl0_discard = stats.common.ttl0_discard; p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes; p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes; p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes; p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts; p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts; p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts; p_common->mftag_filter_discards = stats.common.mftag_filter_discards; p_common->mac_filter_discards = stats.common.mac_filter_discards; p_common->gft_filter_drop = stats.common.gft_filter_drop; p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes; p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes; p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes; p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts; p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts; p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts; p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts; p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts; p_common->coalesced_events = stats.common.tpa_coalesced_events; p_common->coalesced_aborts_num = stats.common.tpa_aborts_num; p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts; p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes; p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets; p_common->rx_65_to_127_byte_packets = stats.common.rx_65_to_127_byte_packets; p_common->rx_128_to_255_byte_packets = stats.common.rx_128_to_255_byte_packets; p_common->rx_256_to_511_byte_packets = stats.common.rx_256_to_511_byte_packets; p_common->rx_512_to_1023_byte_packets = stats.common.rx_512_to_1023_byte_packets; p_common->rx_1024_to_1518_byte_packets = stats.common.rx_1024_to_1518_byte_packets; p_common->rx_crc_errors = stats.common.rx_crc_errors; p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames; p_common->rx_pause_frames = stats.common.rx_pause_frames; p_common->rx_pfc_frames = stats.common.rx_pfc_frames; p_common->rx_align_errors = stats.common.rx_align_errors; p_common->rx_carrier_errors = stats.common.rx_carrier_errors; p_common->rx_oversize_packets = stats.common.rx_oversize_packets; p_common->rx_jabbers = stats.common.rx_jabbers; p_common->rx_undersize_packets = stats.common.rx_undersize_packets; p_common->rx_fragments = stats.common.rx_fragments; p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets; p_common->tx_65_to_127_byte_packets = stats.common.tx_65_to_127_byte_packets; p_common->tx_128_to_255_byte_packets = stats.common.tx_128_to_255_byte_packets; p_common->tx_256_to_511_byte_packets = stats.common.tx_256_to_511_byte_packets; p_common->tx_512_to_1023_byte_packets = stats.common.tx_512_to_1023_byte_packets; p_common->tx_1024_to_1518_byte_packets = stats.common.tx_1024_to_1518_byte_packets; p_common->tx_pause_frames = stats.common.tx_pause_frames; p_common->tx_pfc_frames = stats.common.tx_pfc_frames; p_common->brb_truncates = stats.common.brb_truncates; p_common->brb_discards = stats.common.brb_discards; p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames; p_common->link_change_count = stats.common.link_change_count; p_common->ptp_skip_txts = edev->ptp_skip_txts; if (QEDE_IS_BB(edev)) { struct qede_stats_bb *p_bb = &edev->stats.bb; p_bb->rx_1519_to_1522_byte_packets = stats.bb.rx_1519_to_1522_byte_packets; p_bb->rx_1519_to_2047_byte_packets = stats.bb.rx_1519_to_2047_byte_packets; p_bb->rx_2048_to_4095_byte_packets = stats.bb.rx_2048_to_4095_byte_packets; p_bb->rx_4096_to_9216_byte_packets = stats.bb.rx_4096_to_9216_byte_packets; p_bb->rx_9217_to_16383_byte_packets = stats.bb.rx_9217_to_16383_byte_packets; p_bb->tx_1519_to_2047_byte_packets = stats.bb.tx_1519_to_2047_byte_packets; p_bb->tx_2048_to_4095_byte_packets = stats.bb.tx_2048_to_4095_byte_packets; p_bb->tx_4096_to_9216_byte_packets = stats.bb.tx_4096_to_9216_byte_packets; p_bb->tx_9217_to_16383_byte_packets = stats.bb.tx_9217_to_16383_byte_packets; p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count; p_bb->tx_total_collisions = stats.bb.tx_total_collisions; } else { struct qede_stats_ah *p_ah = &edev->stats.ah; p_ah->rx_1519_to_max_byte_packets = stats.ah.rx_1519_to_max_byte_packets; p_ah->tx_1519_to_max_byte_packets = stats.ah.tx_1519_to_max_byte_packets; } spin_unlock(&edev->stats_lock); } static void qede_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct qede_dev *edev = netdev_priv(dev); struct qede_stats_common *p_common; p_common = &edev->stats.common; spin_lock(&edev->stats_lock); stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + p_common->rx_bcast_pkts; stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + p_common->tx_bcast_pkts; stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + p_common->rx_bcast_bytes; stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + p_common->tx_bcast_bytes; stats->tx_errors = p_common->tx_err_drop_pkts; stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts; stats->rx_fifo_errors = p_common->no_buff_discards; if (QEDE_IS_BB(edev)) stats->collisions = edev->stats.bb.tx_total_collisions; stats->rx_crc_errors = p_common->rx_crc_errors; stats->rx_frame_errors = p_common->rx_align_errors; spin_unlock(&edev->stats_lock); } #ifdef CONFIG_QED_SRIOV static int qede_get_vf_config(struct net_device *dev, int vfidx, struct ifla_vf_info *ivi) { struct qede_dev *edev = netdev_priv(dev); if (!edev->ops) return -EINVAL; return edev->ops->iov->get_config(edev->cdev, vfidx, ivi); } static int qede_set_vf_rate(struct net_device *dev, int vfidx, int min_tx_rate, int max_tx_rate) { struct qede_dev *edev = netdev_priv(dev); return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate, max_tx_rate); } static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val) { struct qede_dev *edev = netdev_priv(dev); if (!edev->ops) return -EINVAL; return edev->ops->iov->set_spoof(edev->cdev, vfidx, val); } static int qede_set_vf_link_state(struct net_device *dev, int vfidx, int link_state) { struct qede_dev *edev = netdev_priv(dev); if (!edev->ops) return -EINVAL; return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state); } static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting) { struct qede_dev *edev = netdev_priv(dev); if (!edev->ops) return -EINVAL; return edev->ops->iov->set_trust(edev->cdev, vfidx, setting); } #endif static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct qede_dev *edev = netdev_priv(dev); if (!netif_running(dev)) return -EAGAIN; switch (cmd) { case SIOCSHWTSTAMP: return qede_ptp_hw_ts(edev, ifr); default: DP_VERBOSE(edev, QED_MSG_DEBUG, "default IOCTL cmd 0x%x\n", cmd); return -EOPNOTSUPP; } return 0; } static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp) { char *p_sb = (char *)fp->sb_info->sb_virt; u32 sb_size, i; sb_size = sizeof(struct status_block); for (i = 0; i < sb_size; i += 8) DP_NOTICE(edev, "%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX\n", p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3], p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]); } static void qede_txq_fp_log_metadata(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq) { struct qed_chain *p_chain = &txq->tx_pbl; /* Dump txq/fp/sb ids etc. other metadata */ DP_NOTICE(edev, "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n", fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos, p_chain, p_chain->capacity, p_chain->size, jiffies, HZ); /* Dump all the relevant prod/cons indexes */ DP_NOTICE(edev, "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n", le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons, qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain)); } static void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq) { struct qed_sb_info_dbg sb_dbg; int rc; /* sb info */ qede_fp_sb_dump(edev, fp); memset(&sb_dbg, 0, sizeof(sb_dbg)); rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg); DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n", sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]); /* report to mfw */ edev->ops->common->mfw_report(edev->cdev, "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n", txq->index, le16_to_cpu(*txq->hw_cons_ptr), qed_chain_get_cons_idx(&txq->tx_pbl), qed_chain_get_prod_idx(&txq->tx_pbl), jiffies); if (!rc) edev->ops->common->mfw_report(edev->cdev, "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n", txq->index, fp->sb_info->igu_sb_id, sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]); } static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct qede_dev *edev = netdev_priv(dev); int i; netif_carrier_off(dev); DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue); for_each_queue(i) { struct qede_tx_queue *txq; struct qede_fastpath *fp; int cos; fp = &edev->fp_array[i]; if (!(fp->type & QEDE_FASTPATH_TX)) continue; for_each_cos_in_txq(edev, cos) { txq = &fp->txq[cos]; /* Dump basic metadata for all queues */ qede_txq_fp_log_metadata(edev, fp, txq); if (qed_chain_get_cons_idx(&txq->tx_pbl) != qed_chain_get_prod_idx(&txq->tx_pbl)) qede_tx_log_print(edev, fp, txq); } } if (IS_VF(edev)) return; if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) || edev->state == QEDE_STATE_RECOVERY) { DP_INFO(edev, "Avoid handling a Tx timeout while another HW error is being handled\n"); return; } set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags); set_bit(QEDE_SP_HW_ERR, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); } static int qede_setup_tc(struct net_device *ndev, u8 num_tc) { struct qede_dev *edev = netdev_priv(ndev); int cos, count, offset; if (num_tc > edev->dev_info.num_tc) return -EINVAL; netdev_reset_tc(ndev); netdev_set_num_tc(ndev, num_tc); for_each_cos_in_txq(edev, cos) { count = QEDE_TSS_COUNT(edev); offset = cos * QEDE_TSS_COUNT(edev); netdev_set_tc_queue(ndev, cos, count, offset); } return 0; } static int qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f, __be16 proto) { switch (f->command) { case FLOW_CLS_REPLACE: return qede_add_tc_flower_fltr(edev, proto, f); case FLOW_CLS_DESTROY: return qede_delete_flow_filter(edev, f->cookie); default: return -EOPNOTSUPP; } } static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { struct flow_cls_offload *f; struct qede_dev *edev = cb_priv; if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data)) return -EOPNOTSUPP; switch (type) { case TC_SETUP_CLSFLOWER: f = type_data; return qede_set_flower(edev, f, f->common.protocol); default: return -EOPNOTSUPP; } } static LIST_HEAD(qede_block_cb_list); static int qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type, void *type_data) { struct qede_dev *edev = netdev_priv(dev); struct tc_mqprio_qopt *mqprio; switch (type) { case TC_SETUP_BLOCK: return flow_block_cb_setup_simple(type_data, &qede_block_cb_list, qede_setup_tc_block_cb, edev, edev, true); case TC_SETUP_QDISC_MQPRIO: mqprio = type_data; mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; return qede_setup_tc(dev, mqprio->num_tc); default: return -EOPNOTSUPP; } } static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, .ndo_start_xmit = qede_start_xmit, .ndo_select_queue = qede_select_queue, .ndo_set_rx_mode = qede_set_rx_mode, .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, .ndo_eth_ioctl = qede_ioctl, .ndo_tx_timeout = qede_tx_timeout, #ifdef CONFIG_QED_SRIOV .ndo_set_vf_mac = qede_set_vf_mac, .ndo_set_vf_vlan = qede_set_vf_vlan, .ndo_set_vf_trust = qede_set_vf_trust, #endif .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, .ndo_fix_features = qede_fix_features, .ndo_set_features = qede_set_features, .ndo_get_stats64 = qede_get_stats64, #ifdef CONFIG_QED_SRIOV .ndo_set_vf_link_state = qede_set_vf_link_state, .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, .ndo_get_vf_config = qede_get_vf_config, .ndo_set_vf_rate = qede_set_vf_rate, #endif .ndo_features_check = qede_features_check, .ndo_bpf = qede_xdp, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = qede_rx_flow_steer, #endif .ndo_xdp_xmit = qede_xdp_transmit, .ndo_setup_tc = qede_setup_tc_offload, }; static const struct net_device_ops qede_netdev_vf_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, .ndo_start_xmit = qede_start_xmit, .ndo_select_queue = qede_select_queue, .ndo_set_rx_mode = qede_set_rx_mode, .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, .ndo_fix_features = qede_fix_features, .ndo_set_features = qede_set_features, .ndo_get_stats64 = qede_get_stats64, .ndo_features_check = qede_features_check, }; static const struct net_device_ops qede_netdev_vf_xdp_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, .ndo_start_xmit = qede_start_xmit, .ndo_select_queue = qede_select_queue, .ndo_set_rx_mode = qede_set_rx_mode, .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, .ndo_fix_features = qede_fix_features, .ndo_set_features = qede_set_features, .ndo_get_stats64 = qede_get_stats64, .ndo_features_check = qede_features_check, .ndo_bpf = qede_xdp, .ndo_xdp_xmit = qede_xdp_transmit, }; /* ------------------------------------------------------------------------- * START OF PROBE / REMOVE * ------------------------------------------------------------------------- */ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, struct pci_dev *pdev, struct qed_dev_eth_info *info, u32 dp_module, u8 dp_level) { struct net_device *ndev; struct qede_dev *edev; ndev = alloc_etherdev_mqs(sizeof(*edev), info->num_queues * info->num_tc, info->num_queues); if (!ndev) { pr_err("etherdev allocation failed\n"); return NULL; } edev = netdev_priv(ndev); edev->ndev = ndev; edev->cdev = cdev; edev->pdev = pdev; edev->dp_module = dp_module; edev->dp_level = dp_level; edev->ops = qed_ops; if (is_kdump_kernel()) { edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN; edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN; } else { edev->q_num_rx_buffers = NUM_RX_BDS_DEF; edev->q_num_tx_buffers = NUM_TX_BDS_DEF; } DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n", info->num_queues, info->num_queues); SET_NETDEV_DEV(ndev, &pdev->dev); memset(&edev->stats, 0, sizeof(edev->stats)); memcpy(&edev->dev_info, info, sizeof(*info)); /* As ethtool doesn't have the ability to show WoL behavior as * 'default', if device supports it declare it's enabled. */ if (edev->dev_info.common.wol_support) edev->wol_enabled = true; INIT_LIST_HEAD(&edev->vlan_list); return edev; } static void qede_init_ndev(struct qede_dev *edev) { struct net_device *ndev = edev->ndev; struct pci_dev *pdev = edev->pdev; bool udp_tunnel_enable = false; netdev_features_t hw_features; pci_set_drvdata(pdev, ndev); ndev->mem_start = edev->dev_info.common.pci_mem_start; ndev->base_addr = ndev->mem_start; ndev->mem_end = edev->dev_info.common.pci_mem_end; ndev->irq = edev->dev_info.common.pci_irq; ndev->watchdog_timeo = TX_TIMEOUT; if (IS_VF(edev)) { if (edev->dev_info.xdp_supported) ndev->netdev_ops = &qede_netdev_vf_xdp_ops; else ndev->netdev_ops = &qede_netdev_vf_ops; } else { ndev->netdev_ops = &qede_netdev_ops; } qede_set_ethtool_ops(ndev); ndev->priv_flags |= IFF_UNICAST_FLT; /* user-changeble features */ hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC; if (edev->dev_info.common.b_arfs_capable) hw_features |= NETIF_F_NTUPLE; if (edev->dev_info.common.vxlan_enable || edev->dev_info.common.geneve_enable) udp_tunnel_enable = true; if (udp_tunnel_enable || edev->dev_info.common.gre_enable) { hw_features |= NETIF_F_TSO_ECN; ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_RXCSUM; } if (udp_tunnel_enable) { hw_features |= (NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM); ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM); qede_set_udp_tunnels(edev); } if (edev->dev_info.common.gre_enable) { hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM); ndev->hw_enc_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM); } ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX; ndev->hw_features = hw_features; ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_NDO_XMIT; /* MTU range: 46 - 9600 */ ndev->min_mtu = ETH_ZLEN - ETH_HLEN; ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE; /* Set network device HW mac */ eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac); ndev->mtu = edev->dev_info.common.mtu; } /* This function converts from 32b param to two params of level and module * Input 32b decoding: * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the * 'happy' flow, e.g. memory allocation failed. * b30 - enable all INFO prints. INFO prints are for major steps in the flow * and provide important parameters. * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that * module. VERBOSE prints are for tracking the specific flow in low level. * * Notice that the level should be that of the lowest required logs. */ void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level) { *p_dp_level = QED_LEVEL_NOTICE; *p_dp_module = 0; if (debug & QED_LOG_VERBOSE_MASK) { *p_dp_level = QED_LEVEL_VERBOSE; *p_dp_module = (debug & 0x3FFFFFFF); } else if (debug & QED_LOG_INFO_MASK) { *p_dp_level = QED_LEVEL_INFO; } else if (debug & QED_LOG_NOTICE_MASK) { *p_dp_level = QED_LEVEL_NOTICE; } } static void qede_free_fp_array(struct qede_dev *edev) { if (edev->fp_array) { struct qede_fastpath *fp; int i; for_each_queue(i) { fp = &edev->fp_array[i]; kfree(fp->sb_info); /* Handle mem alloc failure case where qede_init_fp * didn't register xdp_rxq_info yet. * Implicit only (fp->type & QEDE_FASTPATH_RX) */ if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq)) xdp_rxq_info_unreg(&fp->rxq->xdp_rxq); kfree(fp->rxq); kfree(fp->xdp_tx); kfree(fp->txq); } kfree(edev->fp_array); } edev->num_queues = 0; edev->fp_num_tx = 0; edev->fp_num_rx = 0; } static int qede_alloc_fp_array(struct qede_dev *edev) { u8 fp_combined, fp_rx = edev->fp_num_rx; struct qede_fastpath *fp; int i; edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev), sizeof(*edev->fp_array), GFP_KERNEL); if (!edev->fp_array) { DP_NOTICE(edev, "fp array allocation failed\n"); goto err; } if (!edev->coal_entry) { edev->coal_entry = kcalloc(QEDE_MAX_RSS_CNT(edev), sizeof(*edev->coal_entry), GFP_KERNEL); if (!edev->coal_entry) { DP_ERR(edev, "coalesce entry allocation failed\n"); goto err; } } fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx; /* Allocate the FP elements for Rx queues followed by combined and then * the Tx. This ordering should be maintained so that the respective * queues (Rx or Tx) will be together in the fastpath array and the * associated ids will be sequential. */ for_each_queue(i) { fp = &edev->fp_array[i]; fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL); if (!fp->sb_info) { DP_NOTICE(edev, "sb info struct allocation failed\n"); goto err; } if (fp_rx) { fp->type = QEDE_FASTPATH_RX; fp_rx--; } else if (fp_combined) { fp->type = QEDE_FASTPATH_COMBINED; fp_combined--; } else { fp->type = QEDE_FASTPATH_TX; } if (fp->type & QEDE_FASTPATH_TX) { fp->txq = kcalloc(edev->dev_info.num_tc, sizeof(*fp->txq), GFP_KERNEL); if (!fp->txq) goto err; } if (fp->type & QEDE_FASTPATH_RX) { fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL); if (!fp->rxq) goto err; if (edev->xdp_prog) { fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx), GFP_KERNEL); if (!fp->xdp_tx) goto err; fp->type |= QEDE_FASTPATH_XDP; } } } return 0; err: qede_free_fp_array(edev); return -ENOMEM; } /* The qede lock is used to protect driver state change and driver flows that * are not reentrant. */ void __qede_lock(struct qede_dev *edev) { mutex_lock(&edev->qede_lock); } void __qede_unlock(struct qede_dev *edev) { mutex_unlock(&edev->qede_lock); } /* This version of the lock should be used when acquiring the RTNL lock is also * needed in addition to the internal qede lock. */ static void qede_lock(struct qede_dev *edev) { rtnl_lock(); __qede_lock(edev); } static void qede_unlock(struct qede_dev *edev) { __qede_unlock(edev); rtnl_unlock(); } static void qede_periodic_task(struct work_struct *work) { struct qede_dev *edev = container_of(work, struct qede_dev, periodic_task.work); qede_fill_by_demand_stats(edev); schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks); } static void qede_init_periodic_task(struct qede_dev *edev) { INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task); spin_lock_init(&edev->stats_lock); edev->stats_coal_usecs = USEC_PER_SEC; edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC); } static void qede_sp_task(struct work_struct *work) { struct qede_dev *edev = container_of(work, struct qede_dev, sp_task.work); /* Disable execution of this deferred work once * qede removal is in progress, this stop any future * scheduling of sp_task. */ if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags)) return; /* The locking scheme depends on the specific flag: * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to * ensure that ongoing flows are ended and new ones are not started. * In other cases - only the internal qede lock should be acquired. */ if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) { cancel_delayed_work_sync(&edev->periodic_task); #ifdef CONFIG_QED_SRIOV /* SRIOV must be disabled outside the lock to avoid a deadlock. * The recovery of the active VFs is currently not supported. */ if (pci_num_vf(edev->pdev)) qede_sriov_configure(edev->pdev, 0); #endif qede_lock(edev); qede_recovery_handler(edev); qede_unlock(edev); } __qede_lock(edev); if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags)) if (edev->state == QEDE_STATE_OPEN) qede_config_rx_mode(edev->ndev); #ifdef CONFIG_RFS_ACCEL if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) { if (edev->state == QEDE_STATE_OPEN) qede_process_arfs_filters(edev, false); } #endif if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags)) qede_generic_hw_err_handler(edev); __qede_unlock(edev); if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) { #ifdef CONFIG_QED_SRIOV /* SRIOV must be disabled outside the lock to avoid a deadlock. * The recovery of the active VFs is currently not supported. */ if (pci_num_vf(edev->pdev)) qede_sriov_configure(edev->pdev, 0); #endif edev->ops->common->recovery_process(edev->cdev); } } static void qede_update_pf_params(struct qed_dev *cdev) { struct qed_pf_params pf_params; u16 num_cons; /* 64 rx + 64 tx + 64 XDP */ memset(&pf_params, 0, sizeof(struct qed_pf_params)); /* 1 rx + 1 xdp + max tx cos */ num_cons = QED_MIN_L2_CONS; pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons; /* Same for VFs - make sure they'll have sufficient connections * to support XDP Tx queues. */ pf_params.eth_pf_params.num_vf_cons = 48; pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; qed_ops->common->update_pf_params(cdev, &pf_params); } #define QEDE_FW_VER_STR_SIZE 80 static void qede_log_probe(struct qede_dev *edev) { struct qed_dev_info *p_dev_info = &edev->dev_info.common; u8 buf[QEDE_FW_VER_STR_SIZE]; size_t left_size; snprintf(buf, QEDE_FW_VER_STR_SIZE, "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d", p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev, p_dev_info->fw_eng, (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >> QED_MFW_VERSION_3_OFFSET, (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >> QED_MFW_VERSION_2_OFFSET, (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >> QED_MFW_VERSION_1_OFFSET, (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >> QED_MFW_VERSION_0_OFFSET); left_size = QEDE_FW_VER_STR_SIZE - strlen(buf); if (p_dev_info->mbi_version && left_size) snprintf(buf + strlen(buf), left_size, " [MBI %d.%d.%d]", (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >> QED_MBI_VERSION_2_OFFSET, (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >> QED_MBI_VERSION_1_OFFSET, (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >> QED_MBI_VERSION_0_OFFSET); pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number, PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn), buf, edev->ndev->name); } enum qede_probe_mode { QEDE_PROBE_NORMAL, QEDE_PROBE_RECOVERY, }; static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, bool is_vf, enum qede_probe_mode mode) { struct qed_probe_params probe_params; struct qed_slowpath_params sp_params; struct qed_dev_eth_info dev_info; struct qede_dev *edev; struct qed_dev *cdev; int rc; if (unlikely(dp_level & QED_LEVEL_INFO)) pr_notice("Starting qede probe\n"); memset(&probe_params, 0, sizeof(probe_params)); probe_params.protocol = QED_PROTOCOL_ETH; probe_params.dp_module = dp_module; probe_params.dp_level = dp_level; probe_params.is_vf = is_vf; probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY); cdev = qed_ops->common->probe(pdev, &probe_params); if (!cdev) { rc = -ENODEV; goto err0; } qede_update_pf_params(cdev); /* Start the Slowpath-process */ memset(&sp_params, 0, sizeof(sp_params)); sp_params.int_mode = QED_INT_MODE_MSIX; strscpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE); rc = qed_ops->common->slowpath_start(cdev, &sp_params); if (rc) { pr_notice("Cannot start slowpath\n"); goto err1; } /* Learn information crucial for qede to progress */ rc = qed_ops->fill_dev_info(cdev, &dev_info); if (rc) goto err2; if (mode != QEDE_PROBE_RECOVERY) { edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, dp_level); if (!edev) { rc = -ENOMEM; goto err2; } edev->devlink = qed_ops->common->devlink_register(cdev); if (IS_ERR(edev->devlink)) { DP_NOTICE(edev, "Cannot register devlink\n"); rc = PTR_ERR(edev->devlink); edev->devlink = NULL; goto err3; } } else { struct net_device *ndev = pci_get_drvdata(pdev); struct qed_devlink *qdl; edev = netdev_priv(ndev); qdl = devlink_priv(edev->devlink); qdl->cdev = cdev; edev->cdev = cdev; memset(&edev->stats, 0, sizeof(edev->stats)); memcpy(&edev->dev_info, &dev_info, sizeof(dev_info)); } if (is_vf) set_bit(QEDE_FLAGS_IS_VF, &edev->flags); qede_init_ndev(edev); rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY)); if (rc) goto err3; if (mode != QEDE_PROBE_RECOVERY) { /* Prepare the lock prior to the registration of the netdev, * as once it's registered we might reach flows requiring it * [it's even possible to reach a flow needing it directly * from there, although it's unlikely]. */ INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); mutex_init(&edev->qede_lock); qede_init_periodic_task(edev); rc = register_netdev(edev->ndev); if (rc) { DP_NOTICE(edev, "Cannot register net-device\n"); goto err4; } } edev->ops->common->set_name(cdev, edev->ndev->name); /* PTP not supported on VFs */ if (!is_vf) qede_ptp_enable(edev); edev->ops->register_ops(cdev, &qede_ll_ops, edev); #ifdef CONFIG_DCB if (!IS_VF(edev)) qede_set_dcbnl_ops(edev->ndev); #endif edev->rx_copybreak = QEDE_RX_HDR_SIZE; qede_log_probe(edev); /* retain user config (for example - after recovery) */ if (edev->stats_coal_usecs) schedule_delayed_work(&edev->periodic_task, 0); return 0; err4: qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY)); err3: if (mode != QEDE_PROBE_RECOVERY) free_netdev(edev->ndev); else edev->cdev = NULL; err2: qed_ops->common->slowpath_stop(cdev); err1: qed_ops->common->remove(cdev); err0: return rc; } static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) { bool is_vf = false; u32 dp_module = 0; u8 dp_level = 0; switch ((enum qede_pci_private)id->driver_data) { case QEDE_PRIVATE_VF: if (debug & QED_LOG_VERBOSE_MASK) dev_err(&pdev->dev, "Probing a VF\n"); is_vf = true; break; default: if (debug & QED_LOG_VERBOSE_MASK) dev_err(&pdev->dev, "Probing a PF\n"); } qede_config_debug(debug, &dp_module, &dp_level); return __qede_probe(pdev, dp_module, dp_level, is_vf, QEDE_PROBE_NORMAL); } enum qede_remove_mode { QEDE_REMOVE_NORMAL, QEDE_REMOVE_RECOVERY, }; static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) { struct net_device *ndev = pci_get_drvdata(pdev); struct qede_dev *edev; struct qed_dev *cdev; if (!ndev) { dev_info(&pdev->dev, "Device has already been removed\n"); return; } edev = netdev_priv(ndev); cdev = edev->cdev; DP_INFO(edev, "Starting qede_remove\n"); qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY)); if (mode != QEDE_REMOVE_RECOVERY) { set_bit(QEDE_SP_DISABLE, &edev->sp_flags); unregister_netdev(ndev); cancel_delayed_work_sync(&edev->sp_task); cancel_delayed_work_sync(&edev->periodic_task); edev->ops->common->set_power_state(cdev, PCI_D0); pci_set_drvdata(pdev, NULL); } qede_ptp_disable(edev); /* Use global ops since we've freed edev */ qed_ops->common->slowpath_stop(cdev); if (system_state == SYSTEM_POWER_OFF) return; if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) { qed_ops->common->devlink_unregister(edev->devlink); edev->devlink = NULL; } qed_ops->common->remove(cdev); edev->cdev = NULL; /* Since this can happen out-of-sync with other flows, * don't release the netdevice until after slowpath stop * has been called to guarantee various other contexts * [e.g., QED register callbacks] won't break anything when * accessing the netdevice. */ if (mode != QEDE_REMOVE_RECOVERY) { kfree(edev->coal_entry); free_netdev(ndev); } dev_info(&pdev->dev, "Ending qede_remove successfully\n"); } static void qede_remove(struct pci_dev *pdev) { __qede_remove(pdev, QEDE_REMOVE_NORMAL); } static void qede_shutdown(struct pci_dev *pdev) { __qede_remove(pdev, QEDE_REMOVE_NORMAL); } /* ------------------------------------------------------------------------- * START OF LOAD / UNLOAD * ------------------------------------------------------------------------- */ static int qede_set_num_queues(struct qede_dev *edev) { int rc; u16 rss_num; /* Setup queues according to possible resources*/ if (edev->req_queues) rss_num = edev->req_queues; else rss_num = netif_get_num_default_rss_queues() * edev->dev_info.common.num_hwfns; rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num); rc = edev->ops->common->set_fp_int(edev->cdev, rss_num); if (rc > 0) { /* Managed to request interrupts for our queues */ edev->num_queues = rc; DP_INFO(edev, "Managed %d [of %d] RSS queues\n", QEDE_QUEUE_CNT(edev), rss_num); rc = 0; } edev->fp_num_tx = edev->req_num_tx; edev->fp_num_rx = edev->req_num_rx; return rc; } static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, u16 sb_id) { if (sb_info->sb_virt) { edev->ops->common->sb_release(edev->cdev, sb_info, sb_id, QED_SB_TYPE_L2_QUEUE); dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt), (void *)sb_info->sb_virt, sb_info->sb_phys); memset(sb_info, 0, sizeof(*sb_info)); } } /* This function allocates fast-path status block memory */ static int qede_alloc_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, u16 sb_id) { struct status_block *sb_virt; dma_addr_t sb_phys; int rc; sb_virt = dma_alloc_coherent(&edev->pdev->dev, sizeof(*sb_virt), &sb_phys, GFP_KERNEL); if (!sb_virt) { DP_ERR(edev, "Status block allocation failed\n"); return -ENOMEM; } rc = edev->ops->common->sb_init(edev->cdev, sb_info, sb_virt, sb_phys, sb_id, QED_SB_TYPE_L2_QUEUE); if (rc) { DP_ERR(edev, "Status block initialization failed\n"); dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys); return rc; } return 0; } static void qede_free_rx_buffers(struct qede_dev *edev, struct qede_rx_queue *rxq) { u16 i; for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) { struct sw_rx_data *rx_buf; struct page *data; rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX]; data = rx_buf->data; dma_unmap_page(&edev->pdev->dev, rx_buf->mapping, PAGE_SIZE, rxq->data_direction); rx_buf->data = NULL; __free_page(data); } } static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { /* Free rx buffers */ qede_free_rx_buffers(edev, rxq); /* Free the parallel SW ring */ kfree(rxq->sw_rx_ring); /* Free the real RQ ring used by FW */ edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring); edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring); } static void qede_set_tpa_param(struct qede_rx_queue *rxq) { int i; for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; tpa_info->state = QEDE_AGG_STATE_NONE; } } /* This function allocates all memory needed per Rx queue */ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { struct qed_chain_init_params params = { .cnt_type = QED_CHAIN_CNT_TYPE_U16, .num_elems = RX_RING_SIZE, }; struct qed_dev *cdev = edev->cdev; int i, rc, size; rxq->num_rx_buffers = edev->q_num_rx_buffers; rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu; rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD; size = rxq->rx_headroom + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); /* Make sure that the headroom and payload fit in a single page */ if (rxq->rx_buf_size + size > PAGE_SIZE) rxq->rx_buf_size = PAGE_SIZE - size; /* Segment size to split a page in multiple equal parts, * unless XDP is used in which case we'd use the entire page. */ if (!edev->xdp_prog) { size = size + rxq->rx_buf_size; rxq->rx_buf_seg_size = roundup_pow_of_two(size); } else { rxq->rx_buf_seg_size = PAGE_SIZE; edev->ndev->features &= ~NETIF_F_GRO_HW; } /* Allocate the parallel driver ring for Rx buffers */ size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE; rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); if (!rxq->sw_rx_ring) { DP_ERR(edev, "Rx buffers ring allocation failed\n"); rc = -ENOMEM; goto err; } /* Allocate FW Rx ring */ params.mode = QED_CHAIN_MODE_NEXT_PTR; params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; params.elem_size = sizeof(struct eth_rx_bd); rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, &params); if (rc) goto err; /* Allocate FW completion ring */ params.mode = QED_CHAIN_MODE_PBL; params.intended_use = QED_CHAIN_USE_TO_CONSUME; params.elem_size = sizeof(union eth_rx_cqe); rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, &params); if (rc) goto err; /* Allocate buffers for the Rx ring */ rxq->filled_buffers = 0; for (i = 0; i < rxq->num_rx_buffers; i++) { rc = qede_alloc_rx_buffer(rxq, false); if (rc) { DP_ERR(edev, "Rx buffers allocation failed at index %d\n", i); goto err; } } edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW); if (!edev->gro_disable) qede_set_tpa_param(rxq); err: return rc; } static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) { /* Free the parallel SW ring */ if (txq->is_xdp) kfree(txq->sw_tx_ring.xdp); else kfree(txq->sw_tx_ring.skbs); /* Free the real RQ ring used by FW */ edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl); } /* This function allocates all memory needed per Tx queue */ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) { struct qed_chain_init_params params = { .mode = QED_CHAIN_MODE_PBL, .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, .cnt_type = QED_CHAIN_CNT_TYPE_U16, .num_elems = edev->q_num_tx_buffers, .elem_size = sizeof(union eth_tx_bd_types), }; int size, rc; txq->num_tx_buffers = edev->q_num_tx_buffers; /* Allocate the parallel driver ring for Tx buffers */ if (txq->is_xdp) { size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers; txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL); if (!txq->sw_tx_ring.xdp) goto err; } else { size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers; txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL); if (!txq->sw_tx_ring.skbs) goto err; } rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, &params); if (rc) goto err; return 0; err: qede_free_mem_txq(edev, txq); return -ENOMEM; } /* This function frees all memory of a single fp */ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) { qede_free_mem_sb(edev, fp->sb_info, fp->id); if (fp->type & QEDE_FASTPATH_RX) qede_free_mem_rxq(edev, fp->rxq); if (fp->type & QEDE_FASTPATH_XDP) qede_free_mem_txq(edev, fp->xdp_tx); if (fp->type & QEDE_FASTPATH_TX) { int cos; for_each_cos_in_txq(edev, cos) qede_free_mem_txq(edev, &fp->txq[cos]); } } /* This function allocates all memory needed for a single fp (i.e. an entity * which contains status block, one rx queue and/or multiple per-TC tx queues. */ static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp) { int rc = 0; rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id); if (rc) goto out; if (fp->type & QEDE_FASTPATH_RX) { rc = qede_alloc_mem_rxq(edev, fp->rxq); if (rc) goto out; } if (fp->type & QEDE_FASTPATH_XDP) { rc = qede_alloc_mem_txq(edev, fp->xdp_tx); if (rc) goto out; } if (fp->type & QEDE_FASTPATH_TX) { int cos; for_each_cos_in_txq(edev, cos) { rc = qede_alloc_mem_txq(edev, &fp->txq[cos]); if (rc) goto out; } } out: return rc; } static void qede_free_mem_load(struct qede_dev *edev) { int i; for_each_queue(i) { struct qede_fastpath *fp = &edev->fp_array[i]; qede_free_mem_fp(edev, fp); } } /* This function allocates all qede memory at NIC load. */ static int qede_alloc_mem_load(struct qede_dev *edev) { int rc = 0, queue_id; for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) { struct qede_fastpath *fp = &edev->fp_array[queue_id]; rc = qede_alloc_mem_fp(edev, fp); if (rc) { DP_ERR(edev, "Failed to allocate memory for fastpath - rss id = %d\n", queue_id); qede_free_mem_load(edev); return rc; } } return 0; } static void qede_empty_tx_queue(struct qede_dev *edev, struct qede_tx_queue *txq) { unsigned int pkts_compl = 0, bytes_compl = 0; struct netdev_queue *netdev_txq; int rc, len = 0; netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); while (qed_chain_get_cons_idx(&txq->tx_pbl) != qed_chain_get_prod_idx(&txq->tx_pbl)) { DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", txq->index, qed_chain_get_cons_idx(&txq->tx_pbl), qed_chain_get_prod_idx(&txq->tx_pbl)); rc = qede_free_tx_pkt(edev, txq, &len); if (rc) { DP_NOTICE(edev, "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", txq->index, qed_chain_get_cons_idx(&txq->tx_pbl), qed_chain_get_prod_idx(&txq->tx_pbl)); break; } bytes_compl += len; pkts_compl++; txq->sw_tx_cons++; } netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); } static void qede_empty_tx_queues(struct qede_dev *edev) { int i; for_each_queue(i) if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { int cos; for_each_cos_in_txq(edev, cos) { struct qede_fastpath *fp; fp = &edev->fp_array[i]; qede_empty_tx_queue(edev, &fp->txq[cos]); } } } /* This function inits fp content and resets the SB, RXQ and TXQ structures */ static void qede_init_fp(struct qede_dev *edev) { int queue_id, rxq_index = 0, txq_index = 0; struct qede_fastpath *fp; bool init_xdp = false; for_each_queue(queue_id) { fp = &edev->fp_array[queue_id]; fp->edev = edev; fp->id = queue_id; if (fp->type & QEDE_FASTPATH_XDP) { fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev, rxq_index); fp->xdp_tx->is_xdp = 1; spin_lock_init(&fp->xdp_tx->xdp_tx_lock); init_xdp = true; } if (fp->type & QEDE_FASTPATH_RX) { fp->rxq->rxq_id = rxq_index++; /* Determine how to map buffers for this queue */ if (fp->type & QEDE_FASTPATH_XDP) fp->rxq->data_direction = DMA_BIDIRECTIONAL; else fp->rxq->data_direction = DMA_FROM_DEVICE; fp->rxq->dev = &edev->pdev->dev; /* Driver have no error path from here */ WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev, fp->rxq->rxq_id, 0) < 0); if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq, MEM_TYPE_PAGE_ORDER0, NULL)) { DP_NOTICE(edev, "Failed to register XDP memory model\n"); } } if (fp->type & QEDE_FASTPATH_TX) { int cos; for_each_cos_in_txq(edev, cos) { struct qede_tx_queue *txq = &fp->txq[cos]; u16 ndev_tx_id; txq->cos = cos; txq->index = txq_index; ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq); txq->ndev_txq_id = ndev_tx_id; if (edev->dev_info.is_legacy) txq->is_legacy = true; txq->dev = &edev->pdev->dev; } txq_index++; } snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", edev->ndev->name, queue_id); } if (init_xdp) { edev->total_xdp_queues = QEDE_RSS_COUNT(edev); DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues); } } static int qede_set_real_num_queues(struct qede_dev *edev) { int rc = 0; rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc); if (rc) { DP_NOTICE(edev, "Failed to set real number of Tx queues\n"); return rc; } rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev)); if (rc) { DP_NOTICE(edev, "Failed to set real number of Rx queues\n"); return rc; } return 0; } static void qede_napi_disable_remove(struct qede_dev *edev) { int i; for_each_queue(i) { napi_disable(&edev->fp_array[i].napi); netif_napi_del(&edev->fp_array[i].napi); } } static void qede_napi_add_enable(struct qede_dev *edev) { int i; /* Add NAPI objects */ for_each_queue(i) { netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll); napi_enable(&edev->fp_array[i].napi); } } static void qede_sync_free_irqs(struct qede_dev *edev) { int i; for (i = 0; i < edev->int_info.used_cnt; i++) { if (edev->int_info.msix_cnt) { free_irq(edev->int_info.msix[i].vector, &edev->fp_array[i]); } else { edev->ops->common->simd_handler_clean(edev->cdev, i); } } edev->int_info.used_cnt = 0; edev->int_info.msix_cnt = 0; } static int qede_req_msix_irqs(struct qede_dev *edev) { int i, rc; /* Sanitize number of interrupts == number of prepared RSS queues */ if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) { DP_ERR(edev, "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n", QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt); return -EINVAL; } for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) { #ifdef CONFIG_RFS_ACCEL struct qede_fastpath *fp = &edev->fp_array[i]; if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) { rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap, edev->int_info.msix[i].vector); if (rc) { DP_ERR(edev, "Failed to add CPU rmap\n"); qede_free_arfs(edev); } } #endif rc = request_irq(edev->int_info.msix[i].vector, qede_msix_fp_int, 0, edev->fp_array[i].name, &edev->fp_array[i]); if (rc) { DP_ERR(edev, "Request fp %d irq failed\n", i); #ifdef CONFIG_RFS_ACCEL if (edev->ndev->rx_cpu_rmap) free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); edev->ndev->rx_cpu_rmap = NULL; #endif qede_sync_free_irqs(edev); return rc; } DP_VERBOSE(edev, NETIF_MSG_INTR, "Requested fp irq for %s [entry %d]. Cookie is at %p\n", edev->fp_array[i].name, i, &edev->fp_array[i]); edev->int_info.used_cnt++; } return 0; } static void qede_simd_fp_handler(void *cookie) { struct qede_fastpath *fp = (struct qede_fastpath *)cookie; napi_schedule_irqoff(&fp->napi); } static int qede_setup_irqs(struct qede_dev *edev) { int i, rc = 0; /* Learn Interrupt configuration */ rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info); if (rc) return rc; if (edev->int_info.msix_cnt) { rc = qede_req_msix_irqs(edev); if (rc) return rc; edev->ndev->irq = edev->int_info.msix[0].vector; } else { const struct qed_common_ops *ops; /* qed should learn receive the RSS ids and callbacks */ ops = edev->ops->common; for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) ops->simd_handler_config(edev->cdev, &edev->fp_array[i], i, qede_simd_fp_handler); edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev); } return 0; } static int qede_drain_txq(struct qede_dev *edev, struct qede_tx_queue *txq, bool allow_drain) { int rc, cnt = 1000; while (txq->sw_tx_cons != txq->sw_tx_prod) { if (!cnt) { if (allow_drain) { DP_NOTICE(edev, "Tx queue[%d] is stuck, requesting MCP to drain\n", txq->index); rc = edev->ops->common->drain(edev->cdev); if (rc) return rc; return qede_drain_txq(edev, txq, false); } DP_NOTICE(edev, "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n", txq->index, txq->sw_tx_prod, txq->sw_tx_cons); return -ENODEV; } cnt--; usleep_range(1000, 2000); barrier(); } /* FW finished processing, wait for HW to transmit all tx packets */ usleep_range(1000, 2000); return 0; } static int qede_stop_txq(struct qede_dev *edev, struct qede_tx_queue *txq, int rss_id) { /* delete doorbell from doorbell recovery mechanism */ edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr, &txq->tx_db); return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle); } static int qede_stop_queues(struct qede_dev *edev) { struct qed_update_vport_params *vport_update_params; struct qed_dev *cdev = edev->cdev; struct qede_fastpath *fp; int rc, i; /* Disable the vport */ vport_update_params = vzalloc(sizeof(*vport_update_params)); if (!vport_update_params) return -ENOMEM; vport_update_params->vport_id = 0; vport_update_params->update_vport_active_flg = 1; vport_update_params->vport_active_flg = 0; vport_update_params->update_rss_flg = 0; rc = edev->ops->vport_update(cdev, vport_update_params); vfree(vport_update_params); if (rc) { DP_ERR(edev, "Failed to update vport\n"); return rc; } /* Flush Tx queues. If needed, request drain from MCP */ for_each_queue(i) { fp = &edev->fp_array[i]; if (fp->type & QEDE_FASTPATH_TX) { int cos; for_each_cos_in_txq(edev, cos) { rc = qede_drain_txq(edev, &fp->txq[cos], true); if (rc) return rc; } } if (fp->type & QEDE_FASTPATH_XDP) { rc = qede_drain_txq(edev, fp->xdp_tx, true); if (rc) return rc; } } /* Stop all Queues in reverse order */ for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) { fp = &edev->fp_array[i]; /* Stop the Tx Queue(s) */ if (fp->type & QEDE_FASTPATH_TX) { int cos; for_each_cos_in_txq(edev, cos) { rc = qede_stop_txq(edev, &fp->txq[cos], i); if (rc) return rc; } } /* Stop the Rx Queue */ if (fp->type & QEDE_FASTPATH_RX) { rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle); if (rc) { DP_ERR(edev, "Failed to stop RXQ #%d\n", i); return rc; } } /* Stop the XDP forwarding queue */ if (fp->type & QEDE_FASTPATH_XDP) { rc = qede_stop_txq(edev, fp->xdp_tx, i); if (rc) return rc; bpf_prog_put(fp->rxq->xdp_prog); } } /* Stop the vport */ rc = edev->ops->vport_stop(cdev, 0); if (rc) DP_ERR(edev, "Failed to stop VPORT\n"); return rc; } static int qede_start_txq(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx) { dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl); u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl); struct qed_queue_start_common_params params; struct qed_txq_start_ret_params ret_params; int rc; memset(&params, 0, sizeof(params)); memset(&ret_params, 0, sizeof(ret_params)); /* Let the XDP queue share the queue-zone with one of the regular txq. * We don't really care about its coalescing. */ if (txq->is_xdp) params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq); else params.queue_id = txq->index; params.p_sb = fp->sb_info; params.sb_idx = sb_idx; params.tc = txq->cos; rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table, page_cnt, &ret_params); if (rc) { DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc); return rc; } txq->doorbell_addr = ret_params.p_doorbell; txq->handle = ret_params.p_handle; /* Determine the FW consumer address associated */ txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx]; /* Prepare the doorbell parameters */ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM); SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL, DQ_XCM_ETH_TX_BD_PROD_CMD); txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; /* register doorbell with doorbell recovery mechanism */ rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr, &txq->tx_db, DB_REC_WIDTH_32B, DB_REC_KERNEL); return rc; } static int qede_start_queues(struct qede_dev *edev, bool clear_stats) { int vlan_removal_en = 1; struct qed_dev *cdev = edev->cdev; struct qed_dev_info *qed_info = &edev->dev_info.common; struct qed_update_vport_params *vport_update_params; struct qed_queue_start_common_params q_params; struct qed_start_vport_params start = {0}; int rc, i; if (!edev->num_queues) { DP_ERR(edev, "Cannot update V-VPORT as active as there are no Rx queues\n"); return -EINVAL; } vport_update_params = vzalloc(sizeof(*vport_update_params)); if (!vport_update_params) return -ENOMEM; start.handle_ptp_pkts = !!(edev->ptp); start.gro_enable = !edev->gro_disable; start.mtu = edev->ndev->mtu; start.vport_id = 0; start.drop_ttl0 = true; start.remove_inner_vlan = vlan_removal_en; start.clear_stats = clear_stats; rc = edev->ops->vport_start(cdev, &start); if (rc) { DP_ERR(edev, "Start V-PORT failed %d\n", rc); goto out; } DP_VERBOSE(edev, NETIF_MSG_IFUP, "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n", start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); for_each_queue(i) { struct qede_fastpath *fp = &edev->fp_array[i]; dma_addr_t p_phys_table; u32 page_cnt; if (fp->type & QEDE_FASTPATH_RX) { struct qed_rxq_start_ret_params ret_params; struct qede_rx_queue *rxq = fp->rxq; __le16 *val; memset(&ret_params, 0, sizeof(ret_params)); memset(&q_params, 0, sizeof(q_params)); q_params.queue_id = rxq->rxq_id; q_params.vport_id = 0; q_params.p_sb = fp->sb_info; q_params.sb_idx = RX_PI; p_phys_table = qed_chain_get_pbl_phys(&rxq->rx_comp_ring); page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring); rc = edev->ops->q_rx_start(cdev, i, &q_params, rxq->rx_buf_size, rxq->rx_bd_ring.p_phys_addr, p_phys_table, page_cnt, &ret_params); if (rc) { DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc); goto out; } /* Use the return parameters */ rxq->hw_rxq_prod_addr = ret_params.p_prod; rxq->handle = ret_params.p_handle; val = &fp->sb_info->sb_virt->pi_array[RX_PI]; rxq->hw_cons_ptr = val; qede_update_rx_prod(edev, rxq); } if (fp->type & QEDE_FASTPATH_XDP) { rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI); if (rc) goto out; bpf_prog_add(edev->xdp_prog, 1); fp->rxq->xdp_prog = edev->xdp_prog; } if (fp->type & QEDE_FASTPATH_TX) { int cos; for_each_cos_in_txq(edev, cos) { rc = qede_start_txq(edev, fp, &fp->txq[cos], i, TX_PI(cos)); if (rc) goto out; } } } /* Prepare and send the vport enable */ vport_update_params->vport_id = start.vport_id; vport_update_params->update_vport_active_flg = 1; vport_update_params->vport_active_flg = 1; if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) && qed_info->tx_switching) { vport_update_params->update_tx_switching_flg = 1; vport_update_params->tx_switching_flg = 1; } qede_fill_rss_params(edev, &vport_update_params->rss_params, &vport_update_params->update_rss_flg); rc = edev->ops->vport_update(cdev, vport_update_params); if (rc) DP_ERR(edev, "Update V-PORT failed %d\n", rc); out: vfree(vport_update_params); return rc; } enum qede_unload_mode { QEDE_UNLOAD_NORMAL, QEDE_UNLOAD_RECOVERY, }; static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, bool is_locked) { struct qed_link_params link_params; int rc; DP_INFO(edev, "Starting qede unload\n"); if (!is_locked) __qede_lock(edev); clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags); if (mode != QEDE_UNLOAD_RECOVERY) edev->state = QEDE_STATE_CLOSED; qede_rdma_dev_event_close(edev); /* Close OS Tx */ netif_tx_disable(edev->ndev); netif_carrier_off(edev->ndev); if (mode != QEDE_UNLOAD_RECOVERY) { /* Reset the link */ memset(&link_params, 0, sizeof(link_params)); link_params.link_up = false; edev->ops->common->set_link(edev->cdev, &link_params); rc = qede_stop_queues(edev); if (rc) { #ifdef CONFIG_RFS_ACCEL if (edev->dev_info.common.b_arfs_capable) { qede_poll_for_freeing_arfs_filters(edev); if (edev->ndev->rx_cpu_rmap) free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); edev->ndev->rx_cpu_rmap = NULL; } #endif qede_sync_free_irqs(edev); goto out; } DP_INFO(edev, "Stopped Queues\n"); } qede_vlan_mark_nonconfigured(edev); edev->ops->fastpath_stop(edev->cdev); if (edev->dev_info.common.b_arfs_capable) { qede_poll_for_freeing_arfs_filters(edev); qede_free_arfs(edev); } /* Release the interrupts */ qede_sync_free_irqs(edev); edev->ops->common->set_fp_int(edev->cdev, 0); qede_napi_disable_remove(edev); if (mode == QEDE_UNLOAD_RECOVERY) qede_empty_tx_queues(edev); qede_free_mem_load(edev); qede_free_fp_array(edev); out: if (!is_locked) __qede_unlock(edev); if (mode != QEDE_UNLOAD_RECOVERY) DP_NOTICE(edev, "Link is down\n"); edev->ptp_skip_txts = 0; DP_INFO(edev, "Ending qede unload\n"); } enum qede_load_mode { QEDE_LOAD_NORMAL, QEDE_LOAD_RELOAD, QEDE_LOAD_RECOVERY, }; static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, bool is_locked) { struct qed_link_params link_params; struct ethtool_coalesce coal = {}; u8 num_tc; int rc, i; DP_INFO(edev, "Starting qede load\n"); if (!is_locked) __qede_lock(edev); rc = qede_set_num_queues(edev); if (rc) goto out; rc = qede_alloc_fp_array(edev); if (rc) goto out; qede_init_fp(edev); rc = qede_alloc_mem_load(edev); if (rc) goto err1; DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n", QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev)); rc = qede_set_real_num_queues(edev); if (rc) goto err2; if (qede_alloc_arfs(edev)) { edev->ndev->features &= ~NETIF_F_NTUPLE; edev->dev_info.common.b_arfs_capable = false; } qede_napi_add_enable(edev); DP_INFO(edev, "Napi added and enabled\n"); rc = qede_setup_irqs(edev); if (rc) goto err3; DP_INFO(edev, "Setup IRQs succeeded\n"); rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD); if (rc) goto err4; DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); num_tc = netdev_get_num_tc(edev->ndev); num_tc = num_tc ? num_tc : edev->dev_info.num_tc; qede_setup_tc(edev->ndev, num_tc); /* Program un-configured VLANs */ qede_configure_vlan_filters(edev); set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags); /* Ask for link-up using current configuration */ memset(&link_params, 0, sizeof(link_params)); link_params.link_up = true; edev->ops->common->set_link(edev->cdev, &link_params); edev->state = QEDE_STATE_OPEN; coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS; coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS; for_each_queue(i) { if (edev->coal_entry[i].isvalid) { coal.rx_coalesce_usecs = edev->coal_entry[i].rxc; coal.tx_coalesce_usecs = edev->coal_entry[i].txc; } __qede_unlock(edev); qede_set_per_coalesce(edev->ndev, i, &coal); __qede_lock(edev); } DP_INFO(edev, "Ending successfully qede load\n"); goto out; err4: qede_sync_free_irqs(edev); err3: qede_napi_disable_remove(edev); err2: qede_free_mem_load(edev); err1: edev->ops->common->set_fp_int(edev->cdev, 0); qede_free_fp_array(edev); edev->num_queues = 0; edev->fp_num_tx = 0; edev->fp_num_rx = 0; out: if (!is_locked) __qede_unlock(edev); return rc; } /* 'func' should be able to run between unload and reload assuming interface * is actually running, or afterwards in case it's currently DOWN. */ void qede_reload(struct qede_dev *edev, struct qede_reload_args *args, bool is_locked) { if (!is_locked) __qede_lock(edev); /* Since qede_lock is held, internal state wouldn't change even * if netdev state would start transitioning. Check whether current * internal configuration indicates device is up, then reload. */ if (edev->state == QEDE_STATE_OPEN) { qede_unload(edev, QEDE_UNLOAD_NORMAL, true); if (args) args->func(edev, args); qede_load(edev, QEDE_LOAD_RELOAD, true); /* Since no one is going to do it for us, re-configure */ qede_config_rx_mode(edev->ndev); } else if (args) { args->func(edev, args); } if (!is_locked) __qede_unlock(edev); } /* called with rtnl_lock */ static int qede_open(struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); int rc; netif_carrier_off(ndev); edev->ops->common->set_power_state(edev->cdev, PCI_D0); rc = qede_load(edev, QEDE_LOAD_NORMAL, false); if (rc) return rc; udp_tunnel_nic_reset_ntf(ndev); edev->ops->common->update_drv_state(edev->cdev, true); return 0; } static int qede_close(struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); qede_unload(edev, QEDE_UNLOAD_NORMAL, false); if (edev->cdev) edev->ops->common->update_drv_state(edev->cdev, false); return 0; } static void qede_link_update(void *dev, struct qed_link_output *link) { struct qede_dev *edev = dev; if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) { DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n"); return; } if (link->link_up) { if (!netif_carrier_ok(edev->ndev)) { DP_NOTICE(edev, "Link is up\n"); netif_tx_start_all_queues(edev->ndev); netif_carrier_on(edev->ndev); qede_rdma_dev_event_open(edev); } } else { if (netif_carrier_ok(edev->ndev)) { DP_NOTICE(edev, "Link is down\n"); netif_tx_disable(edev->ndev); netif_carrier_off(edev->ndev); qede_rdma_dev_event_close(edev); } } } static void qede_schedule_recovery_handler(void *dev) { struct qede_dev *edev = dev; if (edev->state == QEDE_STATE_RECOVERY) { DP_NOTICE(edev, "Avoid scheduling a recovery handling since already in recovery state\n"); return; } set_bit(QEDE_SP_RECOVERY, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); DP_INFO(edev, "Scheduled a recovery handler\n"); } static void qede_recovery_failed(struct qede_dev *edev) { netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n"); netif_device_detach(edev->ndev); if (edev->cdev) edev->ops->common->set_power_state(edev->cdev, PCI_D3hot); } static void qede_recovery_handler(struct qede_dev *edev) { u32 curr_state = edev->state; int rc; DP_NOTICE(edev, "Starting a recovery process\n"); /* No need to acquire first the qede_lock since is done by qede_sp_task * before calling this function. */ edev->state = QEDE_STATE_RECOVERY; edev->ops->common->recovery_prolog(edev->cdev); if (curr_state == QEDE_STATE_OPEN) qede_unload(edev, QEDE_UNLOAD_RECOVERY, true); __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY); rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level, IS_VF(edev), QEDE_PROBE_RECOVERY); if (rc) { edev->cdev = NULL; goto err; } if (curr_state == QEDE_STATE_OPEN) { rc = qede_load(edev, QEDE_LOAD_RECOVERY, true); if (rc) goto err; qede_config_rx_mode(edev->ndev); udp_tunnel_nic_reset_ntf(edev->ndev); } edev->state = curr_state; DP_NOTICE(edev, "Recovery handling is done\n"); return; err: qede_recovery_failed(edev); } static void qede_atomic_hw_err_handler(struct qede_dev *edev) { struct qed_dev *cdev = edev->cdev; DP_NOTICE(edev, "Generic non-sleepable HW error handling started - err_flags 0x%lx\n", edev->err_flags); /* Get a call trace of the flow that led to the error */ WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags)); /* Prevent HW attentions from being reasserted */ if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags)) edev->ops->common->attn_clr_enable(cdev, true); DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n"); } static void qede_generic_hw_err_handler(struct qede_dev *edev) { DP_NOTICE(edev, "Generic sleepable HW error handling started - err_flags 0x%lx\n", edev->err_flags); if (edev->devlink) { DP_NOTICE(edev, "Reporting fatal error to devlink\n"); edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type); } clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags); DP_NOTICE(edev, "Generic sleepable HW error handling is done\n"); } static void qede_set_hw_err_flags(struct qede_dev *edev, enum qed_hw_err_type err_type) { unsigned long err_flags = 0; switch (err_type) { case QED_HW_ERR_DMAE_FAIL: set_bit(QEDE_ERR_WARN, &err_flags); fallthrough; case QED_HW_ERR_MFW_RESP_FAIL: case QED_HW_ERR_HW_ATTN: case QED_HW_ERR_RAMROD_FAIL: case QED_HW_ERR_FW_ASSERT: set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags); set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags); /* make this error as recoverable and start recovery*/ set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags); break; default: DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type); break; } edev->err_flags |= err_flags; } static void qede_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type) { struct qede_dev *edev = dev; /* Fan failure cannot be masked by handling of another HW error or by a * concurrent recovery process. */ if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) || edev->state == QEDE_STATE_RECOVERY) && err_type != QED_HW_ERR_FAN_FAIL) { DP_INFO(edev, "Avoid scheduling an error handling while another HW error is being handled\n"); return; } if (err_type >= QED_HW_ERR_LAST) { DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type); clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags); return; } edev->last_err_type = err_type; qede_set_hw_err_flags(edev, err_type); qede_atomic_hw_err_handler(edev); set_bit(QEDE_SP_HW_ERR, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type); } static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq) { struct netdev_queue *netdev_txq; netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); if (netif_xmit_stopped(netdev_txq)) return true; return false; } static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) { struct qede_dev *edev = dev; struct netdev_hw_addr *ha; int i; if (edev->ndev->features & NETIF_F_IP_CSUM) data->feat_flags |= QED_TLV_IP_CSUM; if (edev->ndev->features & NETIF_F_TSO) data->feat_flags |= QED_TLV_LSO; ether_addr_copy(data->mac[0], edev->ndev->dev_addr); eth_zero_addr(data->mac[1]); eth_zero_addr(data->mac[2]); /* Copy the first two UC macs */ netif_addr_lock_bh(edev->ndev); i = 1; netdev_for_each_uc_addr(ha, edev->ndev) { ether_addr_copy(data->mac[i++], ha->addr); if (i == QED_TLV_MAC_COUNT) break; } netif_addr_unlock_bh(edev->ndev); } static void qede_get_eth_tlv_data(void *dev, void *data) { struct qed_mfw_tlv_eth *etlv = data; struct qede_dev *edev = dev; struct qede_fastpath *fp; int i; etlv->lso_maxoff_size = 0XFFFF; etlv->lso_maxoff_size_set = true; etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN; etlv->lso_minseg_size_set = true; etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC); etlv->prom_mode_set = true; etlv->tx_descr_size = QEDE_TSS_COUNT(edev); etlv->tx_descr_size_set = true; etlv->rx_descr_size = QEDE_RSS_COUNT(edev); etlv->rx_descr_size_set = true; etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB; etlv->iov_offload_set = true; /* Fill information regarding queues; Should be done under the qede * lock to guarantee those don't change beneath our feet. */ etlv->txqs_empty = true; etlv->rxqs_empty = true; etlv->num_txqs_full = 0; etlv->num_rxqs_full = 0; __qede_lock(edev); for_each_queue(i) { fp = &edev->fp_array[i]; if (fp->type & QEDE_FASTPATH_TX) { struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp); if (txq->sw_tx_cons != txq->sw_tx_prod) etlv->txqs_empty = false; if (qede_is_txq_full(edev, txq)) etlv->num_txqs_full++; } if (fp->type & QEDE_FASTPATH_RX) { if (qede_has_rx_work(fp->rxq)) etlv->rxqs_empty = false; /* This one is a bit tricky; Firmware might stop * placing packets if ring is not yet full. * Give an approximation. */ if (le16_to_cpu(*fp->rxq->hw_cons_ptr) - qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) > RX_RING_SIZE - 100) etlv->num_rxqs_full++; } } __qede_unlock(edev); etlv->txqs_empty_set = true; etlv->rxqs_empty_set = true; etlv->num_txqs_full_set = true; etlv->num_rxqs_full_set = true; } /** * qede_io_error_detected(): Called when PCI error is detected * * @pdev: Pointer to PCI device * @state: The current pci connection state * *Return: pci_ers_result_t. * * This function is called after a PCI bus error affecting * this device has been detected. */ static pci_ers_result_t qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct qede_dev *edev = netdev_priv(dev); if (!edev) return PCI_ERS_RESULT_NONE; DP_NOTICE(edev, "IO error detected [%d]\n", state); __qede_lock(edev); if (edev->state == QEDE_STATE_RECOVERY) { DP_NOTICE(edev, "Device already in the recovery state\n"); __qede_unlock(edev); return PCI_ERS_RESULT_NONE; } /* PF handles the recovery of its VFs */ if (IS_VF(edev)) { DP_VERBOSE(edev, QED_MSG_IOV, "VF recovery is handled by its PF\n"); __qede_unlock(edev); return PCI_ERS_RESULT_RECOVERED; } /* Close OS Tx */ netif_tx_disable(edev->ndev); netif_carrier_off(edev->ndev); set_bit(QEDE_SP_AER, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); __qede_unlock(edev); return PCI_ERS_RESULT_CAN_RECOVER; }
linux-master
drivers/net/ethernet/qlogic/qede/qede_main.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <net/dcbnl.h> #include "qede.h" static u8 qede_dcbnl_getstate(struct net_device *netdev) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->getstate(edev->cdev); } static u8 qede_dcbnl_setstate(struct net_device *netdev, u8 state) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->setstate(edev->cdev, state); } static void qede_dcbnl_getpermhwaddr(struct net_device *netdev, u8 *perm_addr) { memcpy(perm_addr, netdev->dev_addr, netdev->addr_len); } static void qede_dcbnl_getpgtccfgtx(struct net_device *netdev, int prio, u8 *prio_type, u8 *pgid, u8 *bw_pct, u8 *up_map) { struct qede_dev *edev = netdev_priv(netdev); edev->ops->dcb->getpgtccfgtx(edev->cdev, prio, prio_type, pgid, bw_pct, up_map); } static void qede_dcbnl_getpgbwgcfgtx(struct net_device *netdev, int pgid, u8 *bw_pct) { struct qede_dev *edev = netdev_priv(netdev); edev->ops->dcb->getpgbwgcfgtx(edev->cdev, pgid, bw_pct); } static void qede_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio, u8 *prio_type, u8 *pgid, u8 *bw_pct, u8 *up_map) { struct qede_dev *edev = netdev_priv(netdev); edev->ops->dcb->getpgtccfgrx(edev->cdev, prio, prio_type, pgid, bw_pct, up_map); } static void qede_dcbnl_getpgbwgcfgrx(struct net_device *netdev, int pgid, u8 *bw_pct) { struct qede_dev *edev = netdev_priv(netdev); edev->ops->dcb->getpgbwgcfgrx(edev->cdev, pgid, bw_pct); } static void qede_dcbnl_getpfccfg(struct net_device *netdev, int prio, u8 *setting) { struct qede_dev *edev = netdev_priv(netdev); edev->ops->dcb->getpfccfg(edev->cdev, prio, setting); } static void qede_dcbnl_setpfccfg(struct net_device *netdev, int prio, u8 setting) { struct qede_dev *edev = netdev_priv(netdev); edev->ops->dcb->setpfccfg(edev->cdev, prio, setting); } static u8 qede_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->getcap(edev->cdev, capid, cap); } static int qede_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->getnumtcs(edev->cdev, tcid, num); } static u8 qede_dcbnl_getpfcstate(struct net_device *netdev) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->getpfcstate(edev->cdev); } static int qede_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->getapp(edev->cdev, idtype, id); } static u8 qede_dcbnl_getdcbx(struct net_device *netdev) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->getdcbx(edev->cdev); } static void qede_dcbnl_setpgtccfgtx(struct net_device *netdev, int prio, u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->setpgtccfgtx(edev->cdev, prio, pri_type, pgid, bw_pct, up_map); } static void qede_dcbnl_setpgtccfgrx(struct net_device *netdev, int prio, u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->setpgtccfgrx(edev->cdev, prio, pri_type, pgid, bw_pct, up_map); } static void qede_dcbnl_setpgbwgcfgtx(struct net_device *netdev, int pgid, u8 bw_pct) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->setpgbwgcfgtx(edev->cdev, pgid, bw_pct); } static void qede_dcbnl_setpgbwgcfgrx(struct net_device *netdev, int pgid, u8 bw_pct) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->setpgbwgcfgrx(edev->cdev, pgid, bw_pct); } static u8 qede_dcbnl_setall(struct net_device *netdev) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->setall(edev->cdev); } static int qede_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->setnumtcs(edev->cdev, tcid, num); } static void qede_dcbnl_setpfcstate(struct net_device *netdev, u8 state) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->setpfcstate(edev->cdev, state); } static int qede_dcbnl_setapp(struct net_device *netdev, u8 idtype, u16 idval, u8 up) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->setapp(edev->cdev, idtype, idval, up); } static u8 qede_dcbnl_setdcbx(struct net_device *netdev, u8 state) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->setdcbx(edev->cdev, state); } static u8 qede_dcbnl_getfeatcfg(struct net_device *netdev, int featid, u8 *flags) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->getfeatcfg(edev->cdev, featid, flags); } static u8 qede_dcbnl_setfeatcfg(struct net_device *netdev, int featid, u8 flags) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->setfeatcfg(edev->cdev, featid, flags); } static int qede_dcbnl_peer_getappinfo(struct net_device *netdev, struct dcb_peer_app_info *info, u16 *count) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->peer_getappinfo(edev->cdev, info, count); } static int qede_dcbnl_peer_getapptable(struct net_device *netdev, struct dcb_app *app) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->peer_getapptable(edev->cdev, app); } static int qede_dcbnl_cee_peer_getpfc(struct net_device *netdev, struct cee_pfc *pfc) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->cee_peer_getpfc(edev->cdev, pfc); } static int qede_dcbnl_cee_peer_getpg(struct net_device *netdev, struct cee_pg *pg) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->cee_peer_getpg(edev->cdev, pg); } static int qede_dcbnl_ieee_getpfc(struct net_device *netdev, struct ieee_pfc *pfc) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->ieee_getpfc(edev->cdev, pfc); } static int qede_dcbnl_ieee_setpfc(struct net_device *netdev, struct ieee_pfc *pfc) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->ieee_setpfc(edev->cdev, pfc); } static int qede_dcbnl_ieee_getets(struct net_device *netdev, struct ieee_ets *ets) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->ieee_getets(edev->cdev, ets); } static int qede_dcbnl_ieee_setets(struct net_device *netdev, struct ieee_ets *ets) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->ieee_setets(edev->cdev, ets); } static int qede_dcbnl_ieee_getapp(struct net_device *netdev, struct dcb_app *app) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->ieee_getapp(edev->cdev, app); } static int qede_dcbnl_ieee_setapp(struct net_device *netdev, struct dcb_app *app) { struct qede_dev *edev = netdev_priv(netdev); int err; err = dcb_ieee_setapp(netdev, app); if (err) return err; return edev->ops->dcb->ieee_setapp(edev->cdev, app); } static int qede_dcbnl_ieee_peer_getpfc(struct net_device *netdev, struct ieee_pfc *pfc) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->ieee_peer_getpfc(edev->cdev, pfc); } static int qede_dcbnl_ieee_peer_getets(struct net_device *netdev, struct ieee_ets *ets) { struct qede_dev *edev = netdev_priv(netdev); return edev->ops->dcb->ieee_peer_getets(edev->cdev, ets); } static const struct dcbnl_rtnl_ops qede_dcbnl_ops = { .ieee_getpfc = qede_dcbnl_ieee_getpfc, .ieee_setpfc = qede_dcbnl_ieee_setpfc, .ieee_getets = qede_dcbnl_ieee_getets, .ieee_setets = qede_dcbnl_ieee_setets, .ieee_getapp = qede_dcbnl_ieee_getapp, .ieee_setapp = qede_dcbnl_ieee_setapp, .ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc, .ieee_peer_getets = qede_dcbnl_ieee_peer_getets, .getstate = qede_dcbnl_getstate, .setstate = qede_dcbnl_setstate, .getpermhwaddr = qede_dcbnl_getpermhwaddr, .getpgtccfgtx = qede_dcbnl_getpgtccfgtx, .getpgbwgcfgtx = qede_dcbnl_getpgbwgcfgtx, .getpgtccfgrx = qede_dcbnl_getpgtccfgrx, .getpgbwgcfgrx = qede_dcbnl_getpgbwgcfgrx, .getpfccfg = qede_dcbnl_getpfccfg, .setpfccfg = qede_dcbnl_setpfccfg, .getcap = qede_dcbnl_getcap, .getnumtcs = qede_dcbnl_getnumtcs, .getpfcstate = qede_dcbnl_getpfcstate, .getapp = qede_dcbnl_getapp, .getdcbx = qede_dcbnl_getdcbx, .setpgtccfgtx = qede_dcbnl_setpgtccfgtx, .setpgtccfgrx = qede_dcbnl_setpgtccfgrx, .setpgbwgcfgtx = qede_dcbnl_setpgbwgcfgtx, .setpgbwgcfgrx = qede_dcbnl_setpgbwgcfgrx, .setall = qede_dcbnl_setall, .setnumtcs = qede_dcbnl_setnumtcs, .setpfcstate = qede_dcbnl_setpfcstate, .setapp = qede_dcbnl_setapp, .setdcbx = qede_dcbnl_setdcbx, .setfeatcfg = qede_dcbnl_setfeatcfg, .getfeatcfg = qede_dcbnl_getfeatcfg, .peer_getappinfo = qede_dcbnl_peer_getappinfo, .peer_getapptable = qede_dcbnl_peer_getapptable, .cee_peer_getpfc = qede_dcbnl_cee_peer_getpfc, .cee_peer_getpg = qede_dcbnl_cee_peer_getpg, }; void qede_set_dcbnl_ops(struct net_device *dev) { dev->dcbnl_ops = &qede_dcbnl_ops; }
linux-master
drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <net/udp_tunnel.h> #include <linux/bitops.h> #include <linux/vmalloc.h> #include <linux/qed/qed_if.h> #include "qede.h" #define QEDE_FILTER_PRINT_MAX_LEN (64) struct qede_arfs_tuple { union { __be32 src_ipv4; struct in6_addr src_ipv6; }; union { __be32 dst_ipv4; struct in6_addr dst_ipv6; }; __be16 src_port; __be16 dst_port; __be16 eth_proto; u8 ip_proto; /* Describe filtering mode needed for this kind of filter */ enum qed_filter_config_mode mode; /* Used to compare new/old filters. Return true if IPs match */ bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b); /* Given an address into ethhdr build a header from tuple info */ void (*build_hdr)(struct qede_arfs_tuple *t, void *header); /* Stringify the tuple for a print into the provided buffer */ void (*stringify)(struct qede_arfs_tuple *t, void *buffer); }; struct qede_arfs_fltr_node { #define QEDE_FLTR_VALID 0 unsigned long state; /* pointer to aRFS packet buffer */ void *data; /* dma map address of aRFS packet buffer */ dma_addr_t mapping; /* length of aRFS packet buffer */ int buf_len; /* tuples to hold from aRFS packet buffer */ struct qede_arfs_tuple tuple; u32 flow_id; u64 sw_id; u16 rxq_id; u16 next_rxq_id; u8 vfid; bool filter_op; bool used; u8 fw_rc; bool b_is_drop; struct hlist_node node; }; struct qede_arfs { #define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx]) #define QEDE_ARFS_POLL_COUNT 100 #define QEDE_RFS_FLW_BITSHIFT (4) #define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1) struct hlist_head arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT]; /* lock for filter list access */ spinlock_t arfs_list_lock; unsigned long *arfs_fltr_bmap; int filter_count; /* Currently configured filtering mode */ enum qed_filter_config_mode mode; }; static void qede_configure_arfs_fltr(struct qede_dev *edev, struct qede_arfs_fltr_node *n, u16 rxq_id, bool add_fltr) { const struct qed_eth_ops *op = edev->ops; struct qed_ntuple_filter_params params; if (n->used) return; memset(&params, 0, sizeof(params)); params.addr = n->mapping; params.length = n->buf_len; params.qid = rxq_id; params.b_is_add = add_fltr; params.b_is_drop = n->b_is_drop; if (n->vfid) { params.b_is_vf = true; params.vf_id = n->vfid - 1; } if (n->tuple.stringify) { char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN]; n->tuple.stringify(&n->tuple, tuple_buffer); DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "%s sw_id[0x%llx]: %s [vf %u queue %d]\n", add_fltr ? "Adding" : "Deleting", n->sw_id, tuple_buffer, n->vfid, rxq_id); } n->used = true; n->filter_op = add_fltr; op->ntuple_filter_config(edev->cdev, n, &params); } static void qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr) { kfree(fltr->data); if (fltr->sw_id < QEDE_RFS_MAX_FLTR) clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap); kfree(fltr); } static int qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr, u16 bucket_idx) { fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data, fltr->buf_len, DMA_TO_DEVICE); if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) { DP_NOTICE(edev, "Failed to map DMA memory for rule\n"); qede_free_arfs_filter(edev, fltr); return -ENOMEM; } INIT_HLIST_NODE(&fltr->node); hlist_add_head(&fltr->node, QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx)); edev->arfs->filter_count++; if (edev->arfs->filter_count == 1 && edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) { edev->ops->configure_arfs_searcher(edev->cdev, fltr->tuple.mode); edev->arfs->mode = fltr->tuple.mode; } return 0; } static void qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr) { hlist_del(&fltr->node); dma_unmap_single(&edev->pdev->dev, fltr->mapping, fltr->buf_len, DMA_TO_DEVICE); qede_free_arfs_filter(edev, fltr); edev->arfs->filter_count--; if (!edev->arfs->filter_count && edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) { enum qed_filter_config_mode mode; mode = QED_FILTER_CONFIG_MODE_DISABLE; edev->ops->configure_arfs_searcher(edev->cdev, mode); edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE; } } void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc) { struct qede_arfs_fltr_node *fltr = filter; struct qede_dev *edev = dev; fltr->fw_rc = fw_rc; if (fw_rc) { DP_NOTICE(edev, "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n", fw_rc, fltr->flow_id, fltr->sw_id, ntohs(fltr->tuple.src_port), ntohs(fltr->tuple.dst_port), fltr->rxq_id); spin_lock_bh(&edev->arfs->arfs_list_lock); fltr->used = false; clear_bit(QEDE_FLTR_VALID, &fltr->state); spin_unlock_bh(&edev->arfs->arfs_list_lock); return; } spin_lock_bh(&edev->arfs->arfs_list_lock); fltr->used = false; if (fltr->filter_op) { set_bit(QEDE_FLTR_VALID, &fltr->state); if (fltr->rxq_id != fltr->next_rxq_id) qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false); } else { clear_bit(QEDE_FLTR_VALID, &fltr->state); if (fltr->rxq_id != fltr->next_rxq_id) { fltr->rxq_id = fltr->next_rxq_id; qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, true); } } spin_unlock_bh(&edev->arfs->arfs_list_lock); } /* Should be called while qede_lock is held */ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) { int i; for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) { struct hlist_node *temp; struct hlist_head *head; struct qede_arfs_fltr_node *fltr; head = &edev->arfs->arfs_hl_head[i]; hlist_for_each_entry_safe(fltr, temp, head, node) { bool del = false; if (edev->state != QEDE_STATE_OPEN) del = true; spin_lock_bh(&edev->arfs->arfs_list_lock); if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) && !fltr->used) || free_fltr) { qede_dequeue_fltr_and_config_searcher(edev, fltr); } else { bool flow_exp = false; #ifdef CONFIG_RFS_ACCEL flow_exp = rps_may_expire_flow(edev->ndev, fltr->rxq_id, fltr->flow_id, fltr->sw_id); #endif if ((flow_exp || del) && !free_fltr) qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false); } spin_unlock_bh(&edev->arfs->arfs_list_lock); } } #ifdef CONFIG_RFS_ACCEL spin_lock_bh(&edev->arfs->arfs_list_lock); if (edev->arfs->filter_count) { set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, QEDE_SP_TASK_POLL_DELAY); } spin_unlock_bh(&edev->arfs->arfs_list_lock); #endif } /* This function waits until all aRFS filters get deleted and freed. * On timeout it frees all filters forcefully. */ void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev) { int count = QEDE_ARFS_POLL_COUNT; while (count) { qede_process_arfs_filters(edev, false); if (!edev->arfs->filter_count) break; msleep(100); count--; } if (!count) { DP_NOTICE(edev, "Timeout in polling for arfs filter free\n"); /* Something is terribly wrong, free forcefully */ qede_process_arfs_filters(edev, true); } } int qede_alloc_arfs(struct qede_dev *edev) { int i; if (!edev->dev_info.common.b_arfs_capable) return -EINVAL; edev->arfs = vzalloc(sizeof(*edev->arfs)); if (!edev->arfs) return -ENOMEM; spin_lock_init(&edev->arfs->arfs_list_lock); for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i)); edev->arfs->arfs_fltr_bmap = vzalloc(array_size(sizeof(long), BITS_TO_LONGS(QEDE_RFS_MAX_FLTR))); if (!edev->arfs->arfs_fltr_bmap) { vfree(edev->arfs); edev->arfs = NULL; return -ENOMEM; } #ifdef CONFIG_RFS_ACCEL edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev)); if (!edev->ndev->rx_cpu_rmap) { vfree(edev->arfs->arfs_fltr_bmap); edev->arfs->arfs_fltr_bmap = NULL; vfree(edev->arfs); edev->arfs = NULL; return -ENOMEM; } #endif return 0; } void qede_free_arfs(struct qede_dev *edev) { if (!edev->arfs) return; #ifdef CONFIG_RFS_ACCEL if (edev->ndev->rx_cpu_rmap) free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); edev->ndev->rx_cpu_rmap = NULL; #endif vfree(edev->arfs->arfs_fltr_bmap); edev->arfs->arfs_fltr_bmap = NULL; vfree(edev->arfs); edev->arfs = NULL; } #ifdef CONFIG_RFS_ACCEL static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos, const struct sk_buff *skb) { if (skb->protocol == htons(ETH_P_IP)) { if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr && tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr) return true; else return false; } else { struct in6_addr *src = &tpos->tuple.src_ipv6; u8 size = sizeof(struct in6_addr); if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) && !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size)) return true; else return false; } } static struct qede_arfs_fltr_node * qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb, __be16 src_port, __be16 dst_port, u8 ip_proto) { struct qede_arfs_fltr_node *tpos; hlist_for_each_entry(tpos, h, node) if (tpos->tuple.ip_proto == ip_proto && tpos->tuple.eth_proto == skb->protocol && qede_compare_ip_addr(tpos, skb) && tpos->tuple.src_port == src_port && tpos->tuple.dst_port == dst_port) return tpos; return NULL; } static struct qede_arfs_fltr_node * qede_alloc_filter(struct qede_dev *edev, int min_hlen) { struct qede_arfs_fltr_node *n; int bit_id; bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap, QEDE_RFS_MAX_FLTR); if (bit_id >= QEDE_RFS_MAX_FLTR) return NULL; n = kzalloc(sizeof(*n), GFP_ATOMIC); if (!n) return NULL; n->data = kzalloc(min_hlen, GFP_ATOMIC); if (!n->data) { kfree(n); return NULL; } n->sw_id = (u16)bit_id; set_bit(bit_id, edev->arfs->arfs_fltr_bmap); return n; } int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id) { struct qede_dev *edev = netdev_priv(dev); struct qede_arfs_fltr_node *n; int min_hlen, rc, tp_offset; struct ethhdr *eth; __be16 *ports; u16 tbl_idx; u8 ip_proto; if (skb->encapsulation) return -EPROTONOSUPPORT; if (skb->protocol != htons(ETH_P_IP) && skb->protocol != htons(ETH_P_IPV6)) return -EPROTONOSUPPORT; if (skb->protocol == htons(ETH_P_IP)) { ip_proto = ip_hdr(skb)->protocol; tp_offset = sizeof(struct iphdr); } else { ip_proto = ipv6_hdr(skb)->nexthdr; tp_offset = sizeof(struct ipv6hdr); } if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) return -EPROTONOSUPPORT; ports = (__be16 *)(skb->data + tp_offset); tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK; spin_lock_bh(&edev->arfs->arfs_list_lock); n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx), skb, ports[0], ports[1], ip_proto); if (n) { /* Filter match */ n->next_rxq_id = rxq_index; if (test_bit(QEDE_FLTR_VALID, &n->state)) { if (n->rxq_id != rxq_index) qede_configure_arfs_fltr(edev, n, n->rxq_id, false); } else { if (!n->used) { n->rxq_id = rxq_index; qede_configure_arfs_fltr(edev, n, n->rxq_id, true); } } rc = n->sw_id; goto ret_unlock; } min_hlen = ETH_HLEN + skb_headlen(skb); n = qede_alloc_filter(edev, min_hlen); if (!n) { rc = -ENOMEM; goto ret_unlock; } n->buf_len = min_hlen; n->rxq_id = rxq_index; n->next_rxq_id = rxq_index; n->tuple.src_port = ports[0]; n->tuple.dst_port = ports[1]; n->flow_id = flow_id; if (skb->protocol == htons(ETH_P_IP)) { n->tuple.src_ipv4 = ip_hdr(skb)->saddr; n->tuple.dst_ipv4 = ip_hdr(skb)->daddr; } else { memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr, sizeof(struct in6_addr)); memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, sizeof(struct in6_addr)); } eth = (struct ethhdr *)n->data; eth->h_proto = skb->protocol; n->tuple.eth_proto = skb->protocol; n->tuple.ip_proto = ip_proto; n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE; memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb)); rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx); if (rc) goto ret_unlock; qede_configure_arfs_fltr(edev, n, n->rxq_id, true); spin_unlock_bh(&edev->arfs->arfs_list_lock); set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); return n->sw_id; ret_unlock: spin_unlock_bh(&edev->arfs->arfs_list_lock); return rc; } #endif void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port) { struct qede_dev *edev = dev; if (edev->vxlan_dst_port != vxlan_port) edev->vxlan_dst_port = 0; if (edev->geneve_dst_port != geneve_port) edev->geneve_dst_port = 0; } void qede_force_mac(void *dev, u8 *mac, bool forced) { struct qede_dev *edev = dev; __qede_lock(edev); if (!is_valid_ether_addr(mac)) { __qede_unlock(edev); return; } eth_hw_addr_set(edev->ndev, mac); __qede_unlock(edev); } void qede_fill_rss_params(struct qede_dev *edev, struct qed_update_vport_rss_params *rss, u8 *update) { bool need_reset = false; int i; if (QEDE_RSS_COUNT(edev) <= 1) { memset(rss, 0, sizeof(*rss)); *update = 0; return; } /* Need to validate current RSS config uses valid entries */ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) { need_reset = true; break; } } if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) { for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { u16 indir_val, val; val = QEDE_RSS_COUNT(edev); indir_val = ethtool_rxfh_indir_default(i, val); edev->rss_ind_table[i] = indir_val; } edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; } /* Now that we have the queue-indirection, prepare the handles */ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]); rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle; } if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) { netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key)); edev->rss_params_inited |= QEDE_RSS_KEY_INITED; } memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key)); if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) { edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 | QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP; edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; } rss->rss_caps = edev->rss_caps; *update = 1; } static int qede_set_ucast_rx_mac(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, const unsigned char mac[ETH_ALEN]) { struct qed_filter_ucast_params ucast; memset(&ucast, 0, sizeof(ucast)); ucast.type = opcode; ucast.mac_valid = 1; ether_addr_copy(ucast.mac, mac); return edev->ops->filter_config_ucast(edev->cdev, &ucast); } static int qede_set_ucast_rx_vlan(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, u16 vid) { struct qed_filter_ucast_params ucast; memset(&ucast, 0, sizeof(ucast)); ucast.type = opcode; ucast.vlan_valid = 1; ucast.vlan = vid; return edev->ops->filter_config_ucast(edev->cdev, &ucast); } static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action) { struct qed_update_vport_params *params; int rc; /* Proceed only if action actually needs to be performed */ if (edev->accept_any_vlan == action) return 0; params = vzalloc(sizeof(*params)); if (!params) return -ENOMEM; params->vport_id = 0; params->accept_any_vlan = action; params->update_accept_any_vlan_flg = 1; rc = edev->ops->vport_update(edev->cdev, params); if (rc) { DP_ERR(edev, "Failed to %s accept-any-vlan\n", action ? "enable" : "disable"); } else { DP_INFO(edev, "%s accept-any-vlan\n", action ? "enabled" : "disabled"); edev->accept_any_vlan = action; } vfree(params); return 0; } int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct qede_dev *edev = netdev_priv(dev); struct qede_vlan *vlan, *tmp; int rc = 0; DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid); vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); if (!vlan) { DP_INFO(edev, "Failed to allocate struct for vlan\n"); return -ENOMEM; } INIT_LIST_HEAD(&vlan->list); vlan->vid = vid; vlan->configured = false; /* Verify vlan isn't already configured */ list_for_each_entry(tmp, &edev->vlan_list, list) { if (tmp->vid == vlan->vid) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "vlan already configured\n"); kfree(vlan); return -EEXIST; } } /* If interface is down, cache this VLAN ID and return */ __qede_lock(edev); if (edev->state != QEDE_STATE_OPEN) { DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Interface is down, VLAN %d will be configured when interface is up\n", vid); if (vid != 0) edev->non_configured_vlans++; list_add(&vlan->list, &edev->vlan_list); goto out; } /* Check for the filter limit. * Note - vlan0 has a reserved filter and can be added without * worrying about quota */ if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) || (vlan->vid == 0)) { rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD, vlan->vid); if (rc) { DP_ERR(edev, "Failed to configure VLAN %d\n", vlan->vid); kfree(vlan); goto out; } vlan->configured = true; /* vlan0 filter isn't consuming out of our quota */ if (vlan->vid != 0) edev->configured_vlans++; } else { /* Out of quota; Activate accept-any-VLAN mode */ if (!edev->non_configured_vlans) { rc = qede_config_accept_any_vlan(edev, true); if (rc) { kfree(vlan); goto out; } } edev->non_configured_vlans++; } list_add(&vlan->list, &edev->vlan_list); out: __qede_unlock(edev); return rc; } static void qede_del_vlan_from_list(struct qede_dev *edev, struct qede_vlan *vlan) { /* vlan0 filter isn't consuming out of our quota */ if (vlan->vid != 0) { if (vlan->configured) edev->configured_vlans--; else edev->non_configured_vlans--; } list_del(&vlan->list); kfree(vlan); } int qede_configure_vlan_filters(struct qede_dev *edev) { int rc = 0, real_rc = 0, accept_any_vlan = 0; struct qed_dev_eth_info *dev_info; struct qede_vlan *vlan = NULL; if (list_empty(&edev->vlan_list)) return 0; dev_info = &edev->dev_info; /* Configure non-configured vlans */ list_for_each_entry(vlan, &edev->vlan_list, list) { if (vlan->configured) continue; /* We have used all our credits, now enable accept_any_vlan */ if ((vlan->vid != 0) && (edev->configured_vlans == dev_info->num_vlan_filters)) { accept_any_vlan = 1; continue; } DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid); rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD, vlan->vid); if (rc) { DP_ERR(edev, "Failed to configure VLAN %u\n", vlan->vid); real_rc = rc; continue; } vlan->configured = true; /* vlan0 filter doesn't consume our VLAN filter's quota */ if (vlan->vid != 0) { edev->non_configured_vlans--; edev->configured_vlans++; } } /* enable accept_any_vlan mode if we have more VLANs than credits, * or remove accept_any_vlan mode if we've actually removed * a non-configured vlan, and all remaining vlans are truly configured. */ if (accept_any_vlan) rc = qede_config_accept_any_vlan(edev, true); else if (!edev->non_configured_vlans) rc = qede_config_accept_any_vlan(edev, false); if (rc && !real_rc) real_rc = rc; return real_rc; } int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct qede_dev *edev = netdev_priv(dev); struct qede_vlan *vlan; int rc = 0; DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid); /* Find whether entry exists */ __qede_lock(edev); list_for_each_entry(vlan, &edev->vlan_list, list) if (vlan->vid == vid) break; if (list_entry_is_head(vlan, &edev->vlan_list, list)) { DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), "Vlan isn't configured\n"); goto out; } if (edev->state != QEDE_STATE_OPEN) { /* As interface is already down, we don't have a VPORT * instance to remove vlan filter. So just update vlan list */ DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Interface is down, removing VLAN from list only\n"); qede_del_vlan_from_list(edev, vlan); goto out; } /* Remove vlan */ if (vlan->configured) { rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid); if (rc) { DP_ERR(edev, "Failed to remove VLAN %d\n", vid); goto out; } } qede_del_vlan_from_list(edev, vlan); /* We have removed a VLAN - try to see if we can * configure non-configured VLAN from the list. */ rc = qede_configure_vlan_filters(edev); out: __qede_unlock(edev); return rc; } void qede_vlan_mark_nonconfigured(struct qede_dev *edev) { struct qede_vlan *vlan = NULL; if (list_empty(&edev->vlan_list)) return; list_for_each_entry(vlan, &edev->vlan_list, list) { if (!vlan->configured) continue; vlan->configured = false; /* vlan0 filter isn't consuming out of our quota */ if (vlan->vid != 0) { edev->non_configured_vlans++; edev->configured_vlans--; } DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "marked vlan %d as non-configured\n", vlan->vid); } edev->accept_any_vlan = false; } static void qede_set_features_reload(struct qede_dev *edev, struct qede_reload_args *args) { edev->ndev->features = args->u.features; } netdev_features_t qede_fix_features(struct net_device *dev, netdev_features_t features) { struct qede_dev *edev = netdev_priv(dev); if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE || !(features & NETIF_F_GRO)) features &= ~NETIF_F_GRO_HW; return features; } int qede_set_features(struct net_device *dev, netdev_features_t features) { struct qede_dev *edev = netdev_priv(dev); netdev_features_t changes = features ^ dev->features; bool need_reload = false; if (changes & NETIF_F_GRO_HW) need_reload = true; if (need_reload) { struct qede_reload_args args; args.u.features = features; args.func = &qede_set_features_reload; /* Make sure that we definitely need to reload. * In case of an eBPF attached program, there will be no FW * aggregations, so no need to actually reload. */ __qede_lock(edev); if (edev->xdp_prog) args.func(edev, &args); else qede_reload(edev, &args, true); __qede_unlock(edev); return 1; } return 0; } static int qede_udp_tunnel_sync(struct net_device *dev, unsigned int table) { struct qede_dev *edev = netdev_priv(dev); struct qed_tunn_params tunn_params; struct udp_tunnel_info ti; u16 *save_port; int rc; memset(&tunn_params, 0, sizeof(tunn_params)); udp_tunnel_nic_get_port(dev, table, 0, &ti); if (ti.type == UDP_TUNNEL_TYPE_VXLAN) { tunn_params.update_vxlan_port = 1; tunn_params.vxlan_port = ntohs(ti.port); save_port = &edev->vxlan_dst_port; } else { tunn_params.update_geneve_port = 1; tunn_params.geneve_port = ntohs(ti.port); save_port = &edev->geneve_dst_port; } __qede_lock(edev); rc = edev->ops->tunn_config(edev->cdev, &tunn_params); __qede_unlock(edev); if (rc) return rc; *save_port = ntohs(ti.port); return 0; } static const struct udp_tunnel_nic_info qede_udp_tunnels_both = { .sync_table = qede_udp_tunnel_sync, .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, }, }, qede_udp_tunnels_vxlan = { .sync_table = qede_udp_tunnel_sync, .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, }, }, qede_udp_tunnels_geneve = { .sync_table = qede_udp_tunnel_sync, .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, }, }; void qede_set_udp_tunnels(struct qede_dev *edev) { if (edev->dev_info.common.vxlan_enable && edev->dev_info.common.geneve_enable) edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_both; else if (edev->dev_info.common.vxlan_enable) edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_vxlan; else if (edev->dev_info.common.geneve_enable) edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_geneve; } static void qede_xdp_reload_func(struct qede_dev *edev, struct qede_reload_args *args) { struct bpf_prog *old; old = xchg(&edev->xdp_prog, args->u.new_prog); if (old) bpf_prog_put(old); } static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog) { struct qede_reload_args args; /* If we're called, there was already a bpf reference increment */ args.func = &qede_xdp_reload_func; args.u.new_prog = prog; qede_reload(edev, &args, false); return 0; } int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct qede_dev *edev = netdev_priv(dev); switch (xdp->command) { case XDP_SETUP_PROG: return qede_xdp_set(edev, xdp->prog); default: return -EINVAL; } } static int qede_set_mcast_rx_mac(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, unsigned char *mac, int num_macs) { struct qed_filter_mcast_params mcast; int i; memset(&mcast, 0, sizeof(mcast)); mcast.type = opcode; mcast.num = num_macs; for (i = 0; i < num_macs; i++, mac += ETH_ALEN) ether_addr_copy(mcast.mac[i], mac); return edev->ops->filter_config_mcast(edev->cdev, &mcast); } int qede_set_mac_addr(struct net_device *ndev, void *p) { struct qede_dev *edev = netdev_priv(ndev); struct sockaddr *addr = p; int rc = 0; /* Make sure the state doesn't transition while changing the MAC. * Also, all flows accessing the dev_addr field are doing that under * this lock. */ __qede_lock(edev); if (!is_valid_ether_addr(addr->sa_data)) { DP_NOTICE(edev, "The MAC address is not valid\n"); rc = -EFAULT; goto out; } if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) { DP_NOTICE(edev, "qed prevents setting MAC %pM\n", addr->sa_data); rc = -EINVAL; goto out; } if (edev->state == QEDE_STATE_OPEN) { /* Remove the previous primary mac */ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, ndev->dev_addr); if (rc) goto out; } eth_hw_addr_set(ndev, addr->sa_data); DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data); if (edev->state != QEDE_STATE_OPEN) { DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "The device is currently down\n"); /* Ask PF to explicitly update a copy in bulletin board */ if (IS_VF(edev) && edev->ops->req_bulletin_update_mac) edev->ops->req_bulletin_update_mac(edev->cdev, ndev->dev_addr); goto out; } edev->ops->common->update_mac(edev->cdev, ndev->dev_addr); rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, ndev->dev_addr); out: __qede_unlock(edev); return rc; } static int qede_configure_mcast_filtering(struct net_device *ndev, enum qed_filter_rx_mode_type *accept_flags) { struct qede_dev *edev = netdev_priv(ndev); unsigned char *mc_macs, *temp; struct netdev_hw_addr *ha; int rc = 0, mc_count; size_t size; size = 64 * ETH_ALEN; mc_macs = kzalloc(size, GFP_KERNEL); if (!mc_macs) { DP_NOTICE(edev, "Failed to allocate memory for multicast MACs\n"); rc = -ENOMEM; goto exit; } temp = mc_macs; /* Remove all previously configured MAC filters */ rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, mc_macs, 1); if (rc) goto exit; netif_addr_lock_bh(ndev); mc_count = netdev_mc_count(ndev); if (mc_count <= 64) { netdev_for_each_mc_addr(ha, ndev) { ether_addr_copy(temp, ha->addr); temp += ETH_ALEN; } } netif_addr_unlock_bh(ndev); /* Check for all multicast @@@TBD resource allocation */ if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) { if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR) *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; } else { /* Add all multicast MAC filters */ rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, mc_macs, mc_count); } exit: kfree(mc_macs); return rc; } void qede_set_rx_mode(struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); set_bit(QEDE_SP_RX_MODE, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); } /* Must be called with qede_lock held */ void qede_config_rx_mode(struct net_device *ndev) { enum qed_filter_rx_mode_type accept_flags; struct qede_dev *edev = netdev_priv(ndev); unsigned char *uc_macs, *temp; struct netdev_hw_addr *ha; int rc, uc_count; size_t size; netif_addr_lock_bh(ndev); uc_count = netdev_uc_count(ndev); size = uc_count * ETH_ALEN; uc_macs = kzalloc(size, GFP_ATOMIC); if (!uc_macs) { DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n"); netif_addr_unlock_bh(ndev); return; } temp = uc_macs; netdev_for_each_uc_addr(ha, ndev) { ether_addr_copy(temp, ha->addr); temp += ETH_ALEN; } netif_addr_unlock_bh(ndev); /* Remove all previous unicast secondary macs and multicast macs * (configure / leave the primary mac) */ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE, edev->ndev->dev_addr); if (rc) goto out; /* Check for promiscuous */ if (ndev->flags & IFF_PROMISC) accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; else accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR; /* Configure all filters regardless, in case promisc is rejected */ if (uc_count < edev->dev_info.num_mac_filters) { int i; temp = uc_macs; for (i = 0; i < uc_count; i++) { rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, temp); if (rc) goto out; temp += ETH_ALEN; } } else { accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; } rc = qede_configure_mcast_filtering(ndev, &accept_flags); if (rc) goto out; /* take care of VLAN mode */ if (ndev->flags & IFF_PROMISC) { qede_config_accept_any_vlan(edev, true); } else if (!edev->non_configured_vlans) { /* It's possible that accept_any_vlan mode is set due to a * previous setting of IFF_PROMISC. If vlan credits are * sufficient, disable accept_any_vlan. */ qede_config_accept_any_vlan(edev, false); } edev->ops->filter_config_rx_mode(edev->cdev, accept_flags); out: kfree(uc_macs); } static struct qede_arfs_fltr_node * qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location) { struct qede_arfs_fltr_node *fltr; hlist_for_each_entry(fltr, head, node) if (location == fltr->sw_id) return fltr; return NULL; } int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, u32 *rule_locs) { struct qede_arfs_fltr_node *fltr; struct hlist_head *head; int cnt = 0, rc = 0; info->data = QEDE_RFS_MAX_FLTR; __qede_lock(edev); if (!edev->arfs) { rc = -EPERM; goto unlock; } head = QEDE_ARFS_BUCKET_HEAD(edev, 0); hlist_for_each_entry(fltr, head, node) { if (cnt == info->rule_cnt) { rc = -EMSGSIZE; goto unlock; } rule_locs[cnt] = fltr->sw_id; cnt++; } info->rule_cnt = cnt; unlock: __qede_unlock(edev); return rc; } int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd) { struct ethtool_rx_flow_spec *fsp = &cmd->fs; struct qede_arfs_fltr_node *fltr = NULL; int rc = 0; cmd->data = QEDE_RFS_MAX_FLTR; __qede_lock(edev); if (!edev->arfs) { rc = -EPERM; goto unlock; } fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0), fsp->location); if (!fltr) { DP_NOTICE(edev, "Rule not found - location=0x%x\n", fsp->location); rc = -EINVAL; goto unlock; } if (fltr->tuple.eth_proto == htons(ETH_P_IP)) { if (fltr->tuple.ip_proto == IPPROTO_TCP) fsp->flow_type = TCP_V4_FLOW; else fsp->flow_type = UDP_V4_FLOW; fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port; fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port; fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4; fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4; } else { if (fltr->tuple.ip_proto == IPPROTO_TCP) fsp->flow_type = TCP_V6_FLOW; else fsp->flow_type = UDP_V6_FLOW; fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port; fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port; memcpy(&fsp->h_u.tcp_ip6_spec.ip6src, &fltr->tuple.src_ipv6, sizeof(struct in6_addr)); memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst, &fltr->tuple.dst_ipv6, sizeof(struct in6_addr)); } fsp->ring_cookie = fltr->rxq_id; if (fltr->vfid) { fsp->ring_cookie |= ((u64)fltr->vfid) << ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; } if (fltr->b_is_drop) fsp->ring_cookie = RX_CLS_FLOW_DISC; unlock: __qede_unlock(edev); return rc; } static int qede_poll_arfs_filter_config(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr) { int count = QEDE_ARFS_POLL_COUNT; while (fltr->used && count) { msleep(20); count--; } if (count == 0 || fltr->fw_rc) { DP_NOTICE(edev, "Timeout in polling filter config\n"); qede_dequeue_fltr_and_config_searcher(edev, fltr); return -EIO; } return fltr->fw_rc; } static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t) { int size = ETH_HLEN; if (t->eth_proto == htons(ETH_P_IP)) size += sizeof(struct iphdr); else size += sizeof(struct ipv6hdr); if (t->ip_proto == IPPROTO_TCP) size += sizeof(struct tcphdr); else size += sizeof(struct udphdr); return size; } static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b) { if (a->eth_proto != htons(ETH_P_IP) || b->eth_proto != htons(ETH_P_IP)) return false; return (a->src_ipv4 == b->src_ipv4) && (a->dst_ipv4 == b->dst_ipv4); } static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t, void *header) { __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr)); struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN); struct ethhdr *eth = (struct ethhdr *)header; eth->h_proto = t->eth_proto; ip->saddr = t->src_ipv4; ip->daddr = t->dst_ipv4; ip->version = 0x4; ip->ihl = 0x5; ip->protocol = t->ip_proto; ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN); /* ports is weakly typed to suit both TCP and UDP ports */ ports[0] = t->src_port; ports[1] = t->dst_port; } static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t, void *buffer) { const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP"; snprintf(buffer, QEDE_FILTER_PRINT_MAX_LEN, "%s %pI4 (%04x) -> %pI4 (%04x)", prefix, &t->src_ipv4, t->src_port, &t->dst_ipv4, t->dst_port); } static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b) { if (a->eth_proto != htons(ETH_P_IPV6) || b->eth_proto != htons(ETH_P_IPV6)) return false; if (memcmp(&a->src_ipv6, &b->src_ipv6, sizeof(struct in6_addr))) return false; if (memcmp(&a->dst_ipv6, &b->dst_ipv6, sizeof(struct in6_addr))) return false; return true; } static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t, void *header) { __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr)); struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN); struct ethhdr *eth = (struct ethhdr *)header; eth->h_proto = t->eth_proto; memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr)); memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr)); ip6->version = 0x6; if (t->ip_proto == IPPROTO_TCP) { ip6->nexthdr = NEXTHDR_TCP; ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr)); } else { ip6->nexthdr = NEXTHDR_UDP; ip6->payload_len = cpu_to_be16(sizeof(struct udphdr)); } /* ports is weakly typed to suit both TCP and UDP ports */ ports[0] = t->src_port; ports[1] = t->dst_port; } /* Validate fields which are set and not accepted by the driver */ static int qede_flow_spec_validate_unused(struct qede_dev *edev, struct ethtool_rx_flow_spec *fs) { if (fs->flow_type & FLOW_MAC_EXT) { DP_INFO(edev, "Don't support MAC extensions\n"); return -EOPNOTSUPP; } if ((fs->flow_type & FLOW_EXT) && (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) { DP_INFO(edev, "Don't support vlan-based classification\n"); return -EOPNOTSUPP; } if ((fs->flow_type & FLOW_EXT) && (fs->h_ext.data[0] || fs->h_ext.data[1])) { DP_INFO(edev, "Don't support user defined data\n"); return -EOPNOTSUPP; } return 0; } static int qede_set_v4_tuple_to_profile(struct qede_dev *edev, struct qede_arfs_tuple *t) { /* We must have Only 4-tuples/l4 port/src ip/dst ip * as an input. */ if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) { t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; } else if (!t->src_port && t->dst_port && !t->src_ipv4 && !t->dst_ipv4) { t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; } else if (!t->src_port && !t->dst_port && !t->dst_ipv4 && t->src_ipv4) { t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; } else if (!t->src_port && !t->dst_port && t->dst_ipv4 && !t->src_ipv4) { t->mode = QED_FILTER_CONFIG_MODE_IP_DEST; } else { DP_INFO(edev, "Invalid N-tuple\n"); return -EOPNOTSUPP; } t->ip_comp = qede_flow_spec_ipv4_cmp; t->build_hdr = qede_flow_build_ipv4_hdr; t->stringify = qede_flow_stringify_ipv4_hdr; return 0; } static int qede_set_v6_tuple_to_profile(struct qede_dev *edev, struct qede_arfs_tuple *t, struct in6_addr *zaddr) { /* We must have Only 4-tuples/l4 port/src ip/dst ip * as an input. */ if (t->src_port && t->dst_port && memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) && memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) { t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; } else if (!t->src_port && t->dst_port && !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) && !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) { t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; } else if (!t->src_port && !t->dst_port && !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) && memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) { t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; } else if (!t->src_port && !t->dst_port && memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) && !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) { t->mode = QED_FILTER_CONFIG_MODE_IP_DEST; } else { DP_INFO(edev, "Invalid N-tuple\n"); return -EOPNOTSUPP; } t->ip_comp = qede_flow_spec_ipv6_cmp; t->build_hdr = qede_flow_build_ipv6_hdr; return 0; } /* Must be called while qede lock is held */ static struct qede_arfs_fltr_node * qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t) { struct qede_arfs_fltr_node *fltr; struct hlist_node *temp; struct hlist_head *head; head = QEDE_ARFS_BUCKET_HEAD(edev, 0); hlist_for_each_entry_safe(fltr, temp, head, node) { if (fltr->tuple.ip_proto == t->ip_proto && fltr->tuple.src_port == t->src_port && fltr->tuple.dst_port == t->dst_port && t->ip_comp(&fltr->tuple, t)) return fltr; } return NULL; } static void qede_flow_set_destination(struct qede_dev *edev, struct qede_arfs_fltr_node *n, struct ethtool_rx_flow_spec *fs) { if (fs->ring_cookie == RX_CLS_FLOW_DISC) { n->b_is_drop = true; return; } n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie); n->next_rxq_id = n->rxq_id; if (n->vfid) DP_VERBOSE(edev, QED_MSG_SP, "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1); } int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie) { struct qede_arfs_fltr_node *fltr = NULL; int rc = -EPERM; __qede_lock(edev); if (!edev->arfs) goto unlock; fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0), cookie); if (!fltr) goto unlock; qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false); rc = qede_poll_arfs_filter_config(edev, fltr); if (rc == 0) qede_dequeue_fltr_and_config_searcher(edev, fltr); unlock: __qede_unlock(edev); return rc; } int qede_get_arfs_filter_count(struct qede_dev *edev) { int count = 0; __qede_lock(edev); if (!edev->arfs) goto unlock; count = edev->arfs->filter_count; unlock: __qede_unlock(edev); return count; } static int qede_parse_actions(struct qede_dev *edev, struct flow_action *flow_action, struct netlink_ext_ack *extack) { const struct flow_action_entry *act; int i; if (!flow_action_has_entries(flow_action)) { DP_NOTICE(edev, "No actions received\n"); return -EINVAL; } if (!flow_action_basic_hw_stats_check(flow_action, extack)) return -EOPNOTSUPP; flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_DROP: break; case FLOW_ACTION_QUEUE: if (act->queue.vf) break; if (act->queue.index >= QEDE_RSS_COUNT(edev)) { DP_INFO(edev, "Queue out-of-bounds\n"); return -EINVAL; } break; default: return -EINVAL; } } return 0; } static int qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *t) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports match; flow_rule_match_ports(rule, &match); if ((match.key->src && match.mask->src != htons(U16_MAX)) || (match.key->dst && match.mask->dst != htons(U16_MAX))) { DP_NOTICE(edev, "Do not support ports masks\n"); return -EINVAL; } t->src_port = match.key->src; t->dst_port = match.key->dst; } return 0; } static int qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *t) { struct in6_addr zero_addr, addr; memset(&zero_addr, 0, sizeof(addr)); memset(&addr, 0xff, sizeof(addr)); if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { struct flow_match_ipv6_addrs match; flow_rule_match_ipv6_addrs(rule, &match); if ((memcmp(&match.key->src, &zero_addr, sizeof(addr)) && memcmp(&match.mask->src, &addr, sizeof(addr))) || (memcmp(&match.key->dst, &zero_addr, sizeof(addr)) && memcmp(&match.mask->dst, &addr, sizeof(addr)))) { DP_NOTICE(edev, "Do not support IPv6 address prefix/mask\n"); return -EINVAL; } memcpy(&t->src_ipv6, &match.key->src, sizeof(addr)); memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr)); } if (qede_flow_parse_ports(edev, rule, t)) return -EINVAL; return qede_set_v6_tuple_to_profile(edev, t, &zero_addr); } static int qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *t) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { struct flow_match_ipv4_addrs match; flow_rule_match_ipv4_addrs(rule, &match); if ((match.key->src && match.mask->src != htonl(U32_MAX)) || (match.key->dst && match.mask->dst != htonl(U32_MAX))) { DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n"); return -EINVAL; } t->src_ipv4 = match.key->src; t->dst_ipv4 = match.key->dst; } if (qede_flow_parse_ports(edev, rule, t)) return -EINVAL; return qede_set_v4_tuple_to_profile(edev, t); } static int qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_TCP; tuple->eth_proto = htons(ETH_P_IPV6); return qede_flow_parse_v6_common(edev, rule, tuple); } static int qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_TCP; tuple->eth_proto = htons(ETH_P_IP); return qede_flow_parse_v4_common(edev, rule, tuple); } static int qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_UDP; tuple->eth_proto = htons(ETH_P_IPV6); return qede_flow_parse_v6_common(edev, rule, tuple); } static int qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_UDP; tuple->eth_proto = htons(ETH_P_IP); return qede_flow_parse_v4_common(edev, rule, tuple); } static int qede_parse_flow_attr(struct qede_dev *edev, __be16 proto, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { struct flow_dissector *dissector = rule->match.dissector; int rc = -EINVAL; u8 ip_proto = 0; memset(tuple, 0, sizeof(*tuple)); if (dissector->used_keys & ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) { DP_NOTICE(edev, "Unsupported key set:0x%llx\n", dissector->used_keys); return -EOPNOTSUPP; } if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) { DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto); return -EPROTONOSUPPORT; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; flow_rule_match_basic(rule, &match); ip_proto = match.key->ip_proto; } if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP)) rc = qede_flow_parse_tcp_v4(edev, rule, tuple); else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6)) rc = qede_flow_parse_tcp_v6(edev, rule, tuple); else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP)) rc = qede_flow_parse_udp_v4(edev, rule, tuple); else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6)) rc = qede_flow_parse_udp_v6(edev, rule, tuple); else DP_NOTICE(edev, "Invalid protocol request\n"); return rc; } int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, struct flow_cls_offload *f) { struct qede_arfs_fltr_node *n; int min_hlen, rc = -EINVAL; struct qede_arfs_tuple t; __qede_lock(edev); if (!edev->arfs) { rc = -EPERM; goto unlock; } /* parse flower attribute and prepare filter */ if (qede_parse_flow_attr(edev, proto, f->rule, &t)) goto unlock; /* Validate profile mode and number of filters */ if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) || edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) { DP_NOTICE(edev, "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n", t.mode, edev->arfs->mode, edev->arfs->filter_count); goto unlock; } /* parse tc actions and get the vf_id */ if (qede_parse_actions(edev, &f->rule->action, f->common.extack)) goto unlock; if (qede_flow_find_fltr(edev, &t)) { rc = -EEXIST; goto unlock; } n = kzalloc(sizeof(*n), GFP_KERNEL); if (!n) { rc = -ENOMEM; goto unlock; } min_hlen = qede_flow_get_min_header_size(&t); n->data = kzalloc(min_hlen, GFP_KERNEL); if (!n->data) { kfree(n); rc = -ENOMEM; goto unlock; } memcpy(&n->tuple, &t, sizeof(n->tuple)); n->buf_len = min_hlen; n->b_is_drop = true; n->sw_id = f->cookie; n->tuple.build_hdr(&n->tuple, n->data); rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); if (rc) goto unlock; qede_configure_arfs_fltr(edev, n, n->rxq_id, true); rc = qede_poll_arfs_filter_config(edev, n); unlock: __qede_unlock(edev); return rc; } static int qede_flow_spec_validate(struct qede_dev *edev, struct flow_action *flow_action, struct qede_arfs_tuple *t, __u32 location) { if (location >= QEDE_RFS_MAX_FLTR) { DP_INFO(edev, "Location out-of-bounds\n"); return -EINVAL; } /* Check location isn't already in use */ if (test_bit(location, edev->arfs->arfs_fltr_bmap)) { DP_INFO(edev, "Location already in use\n"); return -EINVAL; } /* Check if the filtering-mode could support the filter */ if (edev->arfs->filter_count && edev->arfs->mode != t->mode) { DP_INFO(edev, "flow_spec would require filtering mode %08x, but %08x is configured\n", t->mode, edev->arfs->filter_count); return -EINVAL; } if (qede_parse_actions(edev, flow_action, NULL)) return -EINVAL; return 0; } static int qede_flow_spec_to_rule(struct qede_dev *edev, struct qede_arfs_tuple *t, struct ethtool_rx_flow_spec *fs) { struct ethtool_rx_flow_spec_input input = {}; struct ethtool_rx_flow_rule *flow; __be16 proto; int err = 0; if (qede_flow_spec_validate_unused(edev, fs)) return -EOPNOTSUPP; switch ((fs->flow_type & ~FLOW_EXT)) { case TCP_V4_FLOW: case UDP_V4_FLOW: proto = htons(ETH_P_IP); break; case TCP_V6_FLOW: case UDP_V6_FLOW: proto = htons(ETH_P_IPV6); break; default: DP_VERBOSE(edev, NETIF_MSG_IFUP, "Can't support flow of type %08x\n", fs->flow_type); return -EOPNOTSUPP; } input.fs = fs; flow = ethtool_rx_flow_rule_create(&input); if (IS_ERR(flow)) return PTR_ERR(flow); if (qede_parse_flow_attr(edev, proto, flow->rule, t)) { err = -EINVAL; goto err_out; } /* Make sure location is valid and filter isn't already set */ err = qede_flow_spec_validate(edev, &flow->rule->action, t, fs->location); err_out: ethtool_rx_flow_rule_destroy(flow); return err; } int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) { struct ethtool_rx_flow_spec *fsp = &info->fs; struct qede_arfs_fltr_node *n; struct qede_arfs_tuple t; int min_hlen, rc; __qede_lock(edev); if (!edev->arfs) { rc = -EPERM; goto unlock; } /* Translate the flow specification into something fittign our DB */ rc = qede_flow_spec_to_rule(edev, &t, fsp); if (rc) goto unlock; if (qede_flow_find_fltr(edev, &t)) { rc = -EINVAL; goto unlock; } n = kzalloc(sizeof(*n), GFP_KERNEL); if (!n) { rc = -ENOMEM; goto unlock; } min_hlen = qede_flow_get_min_header_size(&t); n->data = kzalloc(min_hlen, GFP_KERNEL); if (!n->data) { kfree(n); rc = -ENOMEM; goto unlock; } n->sw_id = fsp->location; set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap); n->buf_len = min_hlen; memcpy(&n->tuple, &t, sizeof(n->tuple)); qede_flow_set_destination(edev, n, fsp); /* Build a minimal header according to the flow */ n->tuple.build_hdr(&n->tuple, n->data); rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); if (rc) goto unlock; qede_configure_arfs_fltr(edev, n, n->rxq_id, true); rc = qede_poll_arfs_filter_config(edev, n); unlock: __qede_unlock(edev); return rc; }
linux-master
drivers/net/ethernet/qlogic/qede/qede_filter.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2003 - 2009 NetXen, Inc. * Copyright (C) 2009 - QLogic Corporation. * All rights reserved. */ #include <linux/types.h> #include <linux/delay.h> #include <linux/pci.h> #include <asm/io.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include "netxen_nic.h" #include "netxen_nic_hw.h" struct netxen_nic_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; }; #define NETXEN_NIC_STAT(m) sizeof(((struct netxen_adapter *)0)->m), \ offsetof(struct netxen_adapter, m) #define NETXEN_NIC_PORT_WINDOW 0x10000 #define NETXEN_NIC_INVALID_DATA 0xDEADBEEF static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = { {"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)}, {"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)}, {"rx_dropped", NETXEN_NIC_STAT(stats.rxdropped)}, {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)}, {"csummed", NETXEN_NIC_STAT(stats.csummed)}, {"rx_pkts", NETXEN_NIC_STAT(stats.rx_pkts)}, {"lro_pkts", NETXEN_NIC_STAT(stats.lro_pkts)}, {"rx_bytes", NETXEN_NIC_STAT(stats.rxbytes)}, {"tx_bytes", NETXEN_NIC_STAT(stats.txbytes)}, }; #define NETXEN_NIC_STATS_LEN ARRAY_SIZE(netxen_nic_gstrings_stats) static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = { "Register_Test_on_offline", "Link_Test_on_offline" }; #define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test) #define NETXEN_NIC_REGS_COUNT 30 #define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32)) #define NETXEN_MAX_EEPROM_LEN 1024 static int netxen_nic_get_eeprom_len(struct net_device *dev) { return NETXEN_FLASH_TOTAL_SIZE; } static void netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { struct netxen_adapter *adapter = netdev_priv(dev); u32 fw_major = 0; u32 fw_minor = 0; u32 fw_build = 0; strscpy(drvinfo->driver, netxen_nic_driver_name, sizeof(drvinfo->driver)); strscpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, sizeof(drvinfo->version)); fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d", fw_major, fw_minor, fw_build); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); } static int netxen_nic_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct netxen_adapter *adapter = netdev_priv(dev); int check_sfp_module = 0; u32 supported, advertising; /* read which mode */ if (adapter->ahw.port_type == NETXEN_NIC_GBE) { supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); advertising = (ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); cmd->base.port = PORT_TP; cmd->base.speed = adapter->link_speed; cmd->base.duplex = adapter->link_duplex; cmd->base.autoneg = adapter->link_autoneg; } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { u32 val; val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); if (val == NETXEN_PORT_MODE_802_3_AP) { supported = SUPPORTED_1000baseT_Full; advertising = ADVERTISED_1000baseT_Full; } else { supported = SUPPORTED_10000baseT_Full; advertising = ADVERTISED_10000baseT_Full; } if (netif_running(dev) && adapter->has_link_events) { cmd->base.speed = adapter->link_speed; cmd->base.autoneg = adapter->link_autoneg; cmd->base.duplex = adapter->link_duplex; goto skip; } cmd->base.port = PORT_TP; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { u16 pcifn = adapter->ahw.pci_func; val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn)); cmd->base.speed = P3_LINK_SPEED_MHZ * P3_LINK_SPEED_VAL(pcifn, val); } else cmd->base.speed = SPEED_10000; cmd->base.duplex = DUPLEX_FULL; cmd->base.autoneg = AUTONEG_DISABLE; } else return -EIO; skip: cmd->base.phy_address = adapter->physical_port; switch (adapter->ahw.board_type) { case NETXEN_BRDTYPE_P2_SB35_4G: case NETXEN_BRDTYPE_P2_SB31_2G: case NETXEN_BRDTYPE_P3_REF_QG: case NETXEN_BRDTYPE_P3_4_GB: case NETXEN_BRDTYPE_P3_4_GB_MM: supported |= SUPPORTED_Autoneg; advertising |= ADVERTISED_Autoneg; fallthrough; case NETXEN_BRDTYPE_P2_SB31_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4_LP: case NETXEN_BRDTYPE_P3_10000_BASE_T: supported |= SUPPORTED_TP; advertising |= ADVERTISED_TP; cmd->base.port = PORT_TP; cmd->base.autoneg = (adapter->ahw.board_type == NETXEN_BRDTYPE_P2_SB31_10G_CX4) ? (AUTONEG_DISABLE) : (adapter->link_autoneg); break; case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: case NETXEN_BRDTYPE_P3_IMEZ: case NETXEN_BRDTYPE_P3_XG_LOM: case NETXEN_BRDTYPE_P3_HMEZ: supported |= SUPPORTED_MII; advertising |= ADVERTISED_MII; cmd->base.port = PORT_MII; cmd->base.autoneg = AUTONEG_DISABLE; break; case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: case NETXEN_BRDTYPE_P3_10G_SFP_CT: case NETXEN_BRDTYPE_P3_10G_SFP_QT: advertising |= ADVERTISED_TP; supported |= SUPPORTED_TP; check_sfp_module = netif_running(dev) && adapter->has_link_events; fallthrough; case NETXEN_BRDTYPE_P2_SB31_10G: case NETXEN_BRDTYPE_P3_10G_XFP: supported |= SUPPORTED_FIBRE; advertising |= ADVERTISED_FIBRE; cmd->base.port = PORT_FIBRE; cmd->base.autoneg = AUTONEG_DISABLE; break; case NETXEN_BRDTYPE_P3_10G_TP: if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { cmd->base.autoneg = AUTONEG_DISABLE; supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); cmd->base.port = PORT_FIBRE; check_sfp_module = netif_running(dev) && adapter->has_link_events; } else { supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); cmd->base.port = PORT_TP; } break; default: printk(KERN_ERR "netxen-nic: Unsupported board model %d\n", adapter->ahw.board_type); return -EIO; } if (check_sfp_module) { switch (adapter->module_type) { case LINKEVENT_MODULE_OPTICAL_UNKNOWN: case LINKEVENT_MODULE_OPTICAL_SRLR: case LINKEVENT_MODULE_OPTICAL_LRM: case LINKEVENT_MODULE_OPTICAL_SFP_1G: cmd->base.port = PORT_FIBRE; break; case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: case LINKEVENT_MODULE_TWINAX: cmd->base.port = PORT_TP; break; default: cmd->base.port = -1; } } if (!netif_running(dev) || !adapter->ahw.linkup) { cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.speed = SPEED_UNKNOWN; } ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); return 0; } static int netxen_nic_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct netxen_adapter *adapter = netdev_priv(dev); u32 speed = cmd->base.speed; int ret; if (adapter->ahw.port_type != NETXEN_NIC_GBE) return -EOPNOTSUPP; if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG)) return -EOPNOTSUPP; ret = nx_fw_cmd_set_gbe_port(adapter, speed, cmd->base.duplex, cmd->base.autoneg); if (ret == NX_RCODE_NOT_SUPPORTED) return -EOPNOTSUPP; else if (ret) return -EIO; adapter->link_speed = speed; adapter->link_duplex = cmd->base.duplex; adapter->link_autoneg = cmd->base.autoneg; if (!netif_running(dev)) return 0; dev->netdev_ops->ndo_stop(dev); return dev->netdev_ops->ndo_open(dev); } static int netxen_nic_get_regs_len(struct net_device *dev) { return NETXEN_NIC_REGS_LEN; } static void netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { struct netxen_adapter *adapter = netdev_priv(dev); struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; struct nx_host_sds_ring *sds_ring; u32 *regs_buff = p; int ring, i = 0; int port = adapter->physical_port; memset(p, 0, NETXEN_NIC_REGS_LEN); regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | (adapter->pdev)->device; if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return; regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE); regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE); regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1); regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg); regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE); regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2); regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c); regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c); regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c); regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c); if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c); i += 2; regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3); regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer)); } else { i++; regs_buff[i++] = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0+(0x10000*port)); regs_buff[i++] = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port)); regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE); regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_consumer); } regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer); regs_buff[i++] = NXRDIO(adapter, recv_ctx->rds_rings[0].crb_rcv_producer); regs_buff[i++] = NXRDIO(adapter, recv_ctx->rds_rings[1].crb_rcv_producer); regs_buff[i++] = adapter->max_sds_rings; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &(recv_ctx->sds_rings[ring]); regs_buff[i++] = NXRDIO(adapter, sds_ring->crb_sts_consumer); } } static u32 netxen_nic_test_link(struct net_device *dev) { struct netxen_adapter *adapter = netdev_priv(dev); u32 val, port; port = adapter->physical_port; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { val = NXRD32(adapter, CRB_XG_STATE_P3); val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); return (val == XG_LINK_UP_P3) ? 0 : 1; } else { val = NXRD32(adapter, CRB_XG_STATE); val = (val >> port*8) & 0xff; return (val == XG_LINK_UP) ? 0 : 1; } } static int netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct netxen_adapter *adapter = netdev_priv(dev); int offset; int ret; if (eeprom->len == 0) return -EINVAL; eeprom->magic = (adapter->pdev)->vendor | ((adapter->pdev)->device << 16); offset = eeprom->offset; ret = netxen_rom_fast_read_words(adapter, offset, bytes, eeprom->len); if (ret < 0) return ret; return 0; } static void netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct netxen_adapter *adapter = netdev_priv(dev); ring->rx_pending = adapter->num_rxd; ring->rx_jumbo_pending = adapter->num_jumbo_rxd; ring->rx_jumbo_pending += adapter->num_lro_rxd; ring->tx_pending = adapter->num_txd; if (adapter->ahw.port_type == NETXEN_NIC_GBE) { ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G; ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G; } else { ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G; ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G; } ring->tx_max_pending = MAX_CMD_DESCRIPTORS; } static u32 netxen_validate_ringparam(u32 val, u32 min, u32 max, char *r_name) { u32 num_desc; num_desc = max(val, min); num_desc = min(num_desc, max); num_desc = roundup_pow_of_two(num_desc); if (val != num_desc) { printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n", netxen_nic_driver_name, r_name, num_desc, val); } return num_desc; } static int netxen_nic_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct netxen_adapter *adapter = netdev_priv(dev); u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G; u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G; u16 num_rxd, num_jumbo_rxd, num_txd; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return -EOPNOTSUPP; if (ring->rx_mini_pending) return -EOPNOTSUPP; if (adapter->ahw.port_type == NETXEN_NIC_GBE) { max_rcv_desc = MAX_RCV_DESCRIPTORS_1G; max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G; } num_rxd = netxen_validate_ringparam(ring->rx_pending, MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx"); num_jumbo_rxd = netxen_validate_ringparam(ring->rx_jumbo_pending, MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo"); num_txd = netxen_validate_ringparam(ring->tx_pending, MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx"); if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd && num_jumbo_rxd == adapter->num_jumbo_rxd) return 0; adapter->num_rxd = num_rxd; adapter->num_jumbo_rxd = num_jumbo_rxd; adapter->num_txd = num_txd; return netxen_nic_reset_context(adapter); } static void netxen_nic_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) { struct netxen_adapter *adapter = netdev_priv(dev); __u32 val; int port = adapter->physical_port; pause->autoneg = 0; if (adapter->ahw.port_type == NETXEN_NIC_GBE) { if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS)) return; /* get flow control settings */ val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); pause->rx_pause = netxen_gb_get_rx_flowctl(val); val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); switch (port) { case 0: pause->tx_pause = !(netxen_gb_get_gb0_mask(val)); break; case 1: pause->tx_pause = !(netxen_gb_get_gb1_mask(val)); break; case 2: pause->tx_pause = !(netxen_gb_get_gb2_mask(val)); break; case 3: default: pause->tx_pause = !(netxen_gb_get_gb3_mask(val)); break; } } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS)) return; pause->rx_pause = 1; val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); if (port == 0) pause->tx_pause = !(netxen_xg_get_xg0_mask(val)); else pause->tx_pause = !(netxen_xg_get_xg1_mask(val)); } else { printk(KERN_ERR"%s: Unknown board type: %x\n", netxen_nic_driver_name, adapter->ahw.port_type); } } static int netxen_nic_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) { struct netxen_adapter *adapter = netdev_priv(dev); __u32 val; int port = adapter->physical_port; /* not supported */ if (pause->autoneg) return -EINVAL; /* read mode */ if (adapter->ahw.port_type == NETXEN_NIC_GBE) { if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS)) return -EIO; /* set flow control */ val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); if (pause->rx_pause) netxen_gb_rx_flowctl(val); else netxen_gb_unset_rx_flowctl(val); NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), val); /* set autoneg */ val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); switch (port) { case 0: if (pause->tx_pause) netxen_gb_unset_gb0_mask(val); else netxen_gb_set_gb0_mask(val); break; case 1: if (pause->tx_pause) netxen_gb_unset_gb1_mask(val); else netxen_gb_set_gb1_mask(val); break; case 2: if (pause->tx_pause) netxen_gb_unset_gb2_mask(val); else netxen_gb_set_gb2_mask(val); break; case 3: default: if (pause->tx_pause) netxen_gb_unset_gb3_mask(val); else netxen_gb_set_gb3_mask(val); break; } NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val); } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS)) return -EIO; val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); if (port == 0) { if (pause->tx_pause) netxen_xg_unset_xg0_mask(val); else netxen_xg_set_xg0_mask(val); } else { if (pause->tx_pause) netxen_xg_unset_xg1_mask(val); else netxen_xg_set_xg1_mask(val); } NXWR32(adapter, NETXEN_NIU_XG_PAUSE_CTL, val); } else { printk(KERN_ERR "%s: Unknown board type: %x\n", netxen_nic_driver_name, adapter->ahw.port_type); } return 0; } static int netxen_nic_reg_test(struct net_device *dev) { struct netxen_adapter *adapter = netdev_priv(dev); u32 data_read, data_written; data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0)); if ((data_read & 0xffff) != adapter->pdev->vendor) return 1; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) return 0; data_written = (u32)0xa5a5a5a5; NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written); data_read = NXRD32(adapter, CRB_SCRATCHPAD_TEST); if (data_written != data_read) return 1; return 0; } static int netxen_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_TEST: return NETXEN_NIC_TEST_LEN; case ETH_SS_STATS: return NETXEN_NIC_STATS_LEN; default: return -EOPNOTSUPP; } } static void netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, u64 *data) { memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN); if ((data[0] = netxen_nic_reg_test(dev))) eth_test->flags |= ETH_TEST_FL_FAILED; /* link test */ if ((data[1] = (u64) netxen_nic_test_link(dev))) eth_test->flags |= ETH_TEST_FL_FAILED; } static void netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 *data) { int index; switch (stringset) { case ETH_SS_TEST: memcpy(data, *netxen_nic_gstrings_test, NETXEN_NIC_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) { memcpy(data + index * ETH_GSTRING_LEN, netxen_nic_gstrings_stats[index].stat_string, ETH_GSTRING_LEN); } break; } } static void netxen_nic_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct netxen_adapter *adapter = netdev_priv(dev); int index; for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) { char *p = (char *)adapter + netxen_nic_gstrings_stats[index].stat_offset; data[index] = (netxen_nic_gstrings_stats[index].sizeof_stat == sizeof(u64)) ? *(u64 *) p : *(u32 *) p; } } static void netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct netxen_adapter *adapter = netdev_priv(dev); u32 wol_cfg = 0; wol->supported = 0; wol->wolopts = 0; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return; wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); if (wol_cfg & (1UL << adapter->portnum)) wol->supported |= WAKE_MAGIC; wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); if (wol_cfg & (1UL << adapter->portnum)) wol->wolopts |= WAKE_MAGIC; } static int netxen_nic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct netxen_adapter *adapter = netdev_priv(dev); u32 wol_cfg = 0; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return -EOPNOTSUPP; if (wol->wolopts & ~WAKE_MAGIC) return -EOPNOTSUPP; wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); if (!(wol_cfg & (1 << adapter->portnum))) return -EOPNOTSUPP; wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); if (wol->wolopts & WAKE_MAGIC) wol_cfg |= 1UL << adapter->portnum; else wol_cfg &= ~(1UL << adapter->portnum); NXWR32(adapter, NETXEN_WOL_CONFIG, wol_cfg); return 0; } /* * Set the coalescing parameters. Currently only normal is supported. * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the * firmware coalescing to default. */ static int netxen_set_intr_coalesce(struct net_device *netdev, struct ethtool_coalesce *ethcoal, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct netxen_adapter *adapter = netdev_priv(netdev); if (!NX_IS_REVISION_P3(adapter->ahw.revision_id)) return -EINVAL; if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return -EINVAL; /* * Return Error if unsupported values or * unsupported parameters are set. */ if (ethcoal->rx_coalesce_usecs > 0xffff || ethcoal->rx_max_coalesced_frames > 0xffff || ethcoal->tx_coalesce_usecs > 0xffff || ethcoal->tx_max_coalesced_frames > 0xffff) return -EINVAL; if (!ethcoal->rx_coalesce_usecs || !ethcoal->rx_max_coalesced_frames) { adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; adapter->coal.normal.data.rx_time_us = NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; adapter->coal.normal.data.rx_packets = NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; } else { adapter->coal.flags = 0; adapter->coal.normal.data.rx_time_us = ethcoal->rx_coalesce_usecs; adapter->coal.normal.data.rx_packets = ethcoal->rx_max_coalesced_frames; } adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs; adapter->coal.normal.data.tx_packets = ethcoal->tx_max_coalesced_frames; netxen_config_intr_coalesce(adapter); return 0; } static int netxen_get_intr_coalesce(struct net_device *netdev, struct ethtool_coalesce *ethcoal, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct netxen_adapter *adapter = netdev_priv(netdev); if (!NX_IS_REVISION_P3(adapter->ahw.revision_id)) return -EINVAL; if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return -EINVAL; ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us; ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us; ethcoal->rx_max_coalesced_frames = adapter->coal.normal.data.rx_packets; ethcoal->tx_max_coalesced_frames = adapter->coal.normal.data.tx_packets; return 0; } static int netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) { struct netxen_adapter *adapter = netdev_priv(netdev); struct netxen_minidump *mdump = &adapter->mdump; if (adapter->fw_mdump_rdy) dump->len = mdump->md_dump_size; else dump->len = 0; if (!mdump->md_enabled) dump->flag = ETH_FW_DUMP_DISABLE; else dump->flag = mdump->md_capture_mask; dump->version = adapter->fw_version; return 0; } /* Fw dump levels */ static const u32 FW_DUMP_LEVELS[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff }; static int netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val) { int i; struct netxen_adapter *adapter = netdev_priv(netdev); struct netxen_minidump *mdump = &adapter->mdump; switch (val->flag) { case NX_FORCE_FW_DUMP_KEY: if (!mdump->md_enabled) { netdev_info(netdev, "FW dump not enabled\n"); return 0; } if (adapter->fw_mdump_rdy) { netdev_info(netdev, "Previous dump not cleared, not forcing dump\n"); return 0; } netdev_info(netdev, "Forcing a fw dump\n"); nx_dev_request_reset(adapter); break; case NX_DISABLE_FW_DUMP: if (mdump->md_enabled) { netdev_info(netdev, "Disabling FW Dump\n"); mdump->md_enabled = 0; } break; case NX_ENABLE_FW_DUMP: if (!mdump->md_enabled) { netdev_info(netdev, "Enabling FW dump\n"); mdump->md_enabled = 1; } break; case NX_FORCE_FW_RESET: netdev_info(netdev, "Forcing FW reset\n"); nx_dev_request_reset(adapter); adapter->flags &= ~NETXEN_FW_RESET_OWNER; break; default: for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) { if (val->flag == FW_DUMP_LEVELS[i]) { mdump->md_capture_mask = val->flag; netdev_info(netdev, "Driver mask changed to: 0x%x\n", mdump->md_capture_mask); return 0; } } netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag); return -EINVAL; } return 0; } static int netxen_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, void *buffer) { int i, copy_sz; u32 *hdr_ptr, *data; struct netxen_adapter *adapter = netdev_priv(netdev); struct netxen_minidump *mdump = &adapter->mdump; if (!adapter->fw_mdump_rdy) { netdev_info(netdev, "Dump not available\n"); return -EINVAL; } /* Copy template header first */ copy_sz = mdump->md_template_size; hdr_ptr = (u32 *) mdump->md_template; data = buffer; for (i = 0; i < copy_sz/sizeof(u32); i++) *data++ = cpu_to_le32(*hdr_ptr++); /* Copy captured dump data */ memcpy(buffer + copy_sz, mdump->md_capture_buff + mdump->md_template_size, mdump->md_capture_size); dump->len = copy_sz + mdump->md_capture_size; dump->flag = mdump->md_capture_mask; /* Free dump area once data has been captured */ vfree(mdump->md_capture_buff); mdump->md_capture_buff = NULL; adapter->fw_mdump_rdy = 0; netdev_info(netdev, "extracted the fw dump Successfully\n"); return 0; } const struct ethtool_ops netxen_nic_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES, .get_drvinfo = netxen_nic_get_drvinfo, .get_regs_len = netxen_nic_get_regs_len, .get_regs = netxen_nic_get_regs, .get_link = ethtool_op_get_link, .get_eeprom_len = netxen_nic_get_eeprom_len, .get_eeprom = netxen_nic_get_eeprom, .get_ringparam = netxen_nic_get_ringparam, .set_ringparam = netxen_nic_set_ringparam, .get_pauseparam = netxen_nic_get_pauseparam, .set_pauseparam = netxen_nic_set_pauseparam, .get_wol = netxen_nic_get_wol, .set_wol = netxen_nic_set_wol, .self_test = netxen_nic_diag_test, .get_strings = netxen_nic_get_strings, .get_ethtool_stats = netxen_nic_get_ethtool_stats, .get_sset_count = netxen_get_sset_count, .get_coalesce = netxen_get_intr_coalesce, .set_coalesce = netxen_set_intr_coalesce, .get_dump_flag = netxen_get_dump_flag, .get_dump_data = netxen_get_dump_data, .set_dump = netxen_set_dump, .get_link_ksettings = netxen_nic_get_link_ksettings, .set_link_ksettings = netxen_nic_set_link_ksettings, };
linux-master
drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2003 - 2009 NetXen, Inc. * Copyright (C) 2009 - QLogic Corporation. * All rights reserved. */ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include "netxen_nic_hw.h" #include "netxen_nic.h" #include <linux/dma-mapping.h> #include <linux/if_vlan.h> #include <net/ip.h> #include <linux/ipv6.h> #include <linux/inetdevice.h> #include <linux/sysfs.h> MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); char netxen_nic_driver_name[] = "netxen_nic"; static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v" NETXEN_NIC_LINUX_VERSIONID; static int port_mode = NETXEN_PORT_MODE_AUTO_NEG; /* Default to restricted 1G auto-neg mode */ static int wol_port_mode = 5; static int use_msi = 1; static int use_msi_x = 1; static int auto_fw_reset = AUTO_FW_RESET_ENABLED; module_param(auto_fw_reset, int, 0644); MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled"); static int netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void netxen_nic_remove(struct pci_dev *pdev); static int netxen_nic_open(struct net_device *netdev); static int netxen_nic_close(struct net_device *netdev); static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *, struct net_device *); static void netxen_tx_timeout(struct net_device *netdev, unsigned int txqueue); static void netxen_tx_timeout_task(struct work_struct *work); static void netxen_fw_poll_work(struct work_struct *work); static void netxen_schedule_work(struct netxen_adapter *adapter, work_func_t func, int delay); static void netxen_cancel_fw_work(struct netxen_adapter *adapter); static int netxen_nic_poll(struct napi_struct *napi, int budget); static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); static void netxen_create_diag_entries(struct netxen_adapter *adapter); static void netxen_remove_diag_entries(struct netxen_adapter *adapter); static int nx_dev_request_aer(struct netxen_adapter *adapter); static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter); static int netxen_can_start_firmware(struct netxen_adapter *adapter); static irqreturn_t netxen_intr(int irq, void *data); static irqreturn_t netxen_msi_intr(int irq, void *data); static irqreturn_t netxen_msix_intr(int irq, void *data); static void netxen_free_ip_list(struct netxen_adapter *, bool); static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); static void netxen_nic_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); static int netxen_nic_set_mac(struct net_device *netdev, void *p); /* PCI Device ID Table */ #define ENTRY(device) \ {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} static const struct pci_device_id netxen_pci_tbl[] = { ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), ENTRY(PCI_DEVICE_ID_NX2031_10GCX4), ENTRY(PCI_DEVICE_ID_NX2031_4GCU), ENTRY(PCI_DEVICE_ID_NX2031_IMEZ), ENTRY(PCI_DEVICE_ID_NX2031_HMEZ), ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT), ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2), ENTRY(PCI_DEVICE_ID_NX3031), {0,} }; MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); static uint32_t crb_cmd_producer[4] = { CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1, CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3 }; void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, struct nx_host_tx_ring *tx_ring) { NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer); } static uint32_t crb_cmd_consumer[4] = { CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1, CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3 }; static inline void netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, struct nx_host_tx_ring *tx_ring) { NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer); } static uint32_t msi_tgt_status[8] = { ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 }; static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring) { struct netxen_adapter *adapter = sds_ring->adapter; NXWRIO(adapter, sds_ring->crb_intr_mask, 0); } static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring) { struct netxen_adapter *adapter = sds_ring->adapter; NXWRIO(adapter, sds_ring->crb_intr_mask, 0x1); if (!NETXEN_IS_MSI_FAMILY(adapter)) NXWRIO(adapter, adapter->tgt_mask_reg, 0xfbff); } static int netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count) { int size = sizeof(struct nx_host_sds_ring) * count; recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); return recv_ctx->sds_rings == NULL; } static void netxen_free_sds_rings(struct netxen_recv_context *recv_ctx) { kfree(recv_ctx->sds_rings); recv_ctx->sds_rings = NULL; } static int netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) { int ring; struct nx_host_sds_ring *sds_ring; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) return -ENOMEM; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; netif_napi_add(netdev, &sds_ring->napi, netxen_nic_poll); } return 0; } static void netxen_napi_del(struct netxen_adapter *adapter) { int ring; struct nx_host_sds_ring *sds_ring; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; netif_napi_del(&sds_ring->napi); } netxen_free_sds_rings(&adapter->recv_ctx); } static void netxen_napi_enable(struct netxen_adapter *adapter) { int ring; struct nx_host_sds_ring *sds_ring; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; napi_enable(&sds_ring->napi); netxen_nic_enable_int(sds_ring); } } static void netxen_napi_disable(struct netxen_adapter *adapter) { int ring; struct nx_host_sds_ring *sds_ring; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; netxen_nic_disable_int(sds_ring); napi_synchronize(&sds_ring->napi); napi_disable(&sds_ring->napi); } } static int nx_set_dma_mask(struct netxen_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; uint64_t mask, cmask; adapter->pci_using_dac = 0; mask = DMA_BIT_MASK(32); cmask = DMA_BIT_MASK(32); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { #ifndef CONFIG_IA64 mask = DMA_BIT_MASK(35); #endif } else { mask = DMA_BIT_MASK(39); cmask = mask; } if (dma_set_mask(&pdev->dev, mask) == 0 && dma_set_coherent_mask(&pdev->dev, cmask) == 0) { adapter->pci_using_dac = 1; return 0; } return -EIO; } /* Update addressable range if firmware supports it */ static int nx_update_dma_mask(struct netxen_adapter *adapter) { int change, shift, err; uint64_t mask, old_mask, old_cmask; struct pci_dev *pdev = adapter->pdev; change = 0; shift = NXRD32(adapter, CRB_DMA_SHIFT); if (shift > 32) return 0; if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9)) change = 1; else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4)) change = 1; if (change) { old_mask = pdev->dma_mask; old_cmask = pdev->dev.coherent_dma_mask; mask = DMA_BIT_MASK(32+shift); err = dma_set_mask(&pdev->dev, mask); if (err) goto err_out; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { err = dma_set_coherent_mask(&pdev->dev, mask); if (err) goto err_out; } dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift); } return 0; err_out: dma_set_mask(&pdev->dev, old_mask); dma_set_coherent_mask(&pdev->dev, old_cmask); return err; } static int netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot) { u32 val, timeout; if (first_boot == 0x55555555) { /* This is the first boot after power up */ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 0; /* PCI bus master workaround */ first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4)); if (!(first_boot & 0x4)) { first_boot |= 0x4; NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot); NXRD32(adapter, NETXEN_PCIE_REG(0x4)); } /* This is the first boot after power up */ first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); if (first_boot != 0x80000f) { /* clear the register for future unloads/loads */ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0); return -EIO; } /* Start P2 boot loader */ val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE); NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1); timeout = 0; do { msleep(1); val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); if (++timeout > 5000) return -EIO; } while (val == NETXEN_BDINFO_MAGIC); } return 0; } static void netxen_set_port_mode(struct netxen_adapter *adapter) { u32 val, data; val = adapter->ahw.board_type; if ((val == NETXEN_BRDTYPE_P3_HMEZ) || (val == NETXEN_BRDTYPE_P3_XG_LOM)) { if (port_mode == NETXEN_PORT_MODE_802_3_AP) { data = NETXEN_PORT_MODE_802_3_AP; NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); } else if (port_mode == NETXEN_PORT_MODE_XG) { data = NETXEN_PORT_MODE_XG; NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) { data = NETXEN_PORT_MODE_AUTO_NEG_1G; NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) { data = NETXEN_PORT_MODE_AUTO_NEG_XG; NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); } else { data = NETXEN_PORT_MODE_AUTO_NEG; NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); } if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) && (wol_port_mode != NETXEN_PORT_MODE_XG) && (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) && (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) { wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG; } NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode); } } #define PCI_CAP_ID_GEN 0x10 static void netxen_pcie_strap_init(struct netxen_adapter *adapter) { u32 pdevfuncsave; u32 c8c9value = 0; u32 chicken = 0; u32 control = 0; int i, pos; struct pci_dev *pdev; pdev = adapter->pdev; chicken = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3)); /* clear chicken3.25:24 */ chicken &= 0xFCFFFFFF; /* * if gen1 and B0, set F1020 - if gen 2, do nothing * if gen2 set to F1000 */ pos = pci_find_capability(pdev, PCI_CAP_ID_GEN); if (pos == 0xC0) { pci_read_config_dword(pdev, pos + 0x10, &control); if ((control & 0x000F0000) != 0x00020000) { /* set chicken3.24 if gen1 */ chicken |= 0x01000000; } dev_info(&adapter->pdev->dev, "Gen2 strapping detected\n"); c8c9value = 0xF1000; } else { /* set chicken3.24 if gen1 */ chicken |= 0x01000000; dev_info(&adapter->pdev->dev, "Gen1 strapping detected\n"); if (adapter->ahw.revision_id == NX_P3_B0) c8c9value = 0xF1020; else c8c9value = 0; } NXWR32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3), chicken); if (!c8c9value) return; pdevfuncsave = pdev->devfn; if (pdevfuncsave & 0x07) return; for (i = 0; i < 8; i++) { pci_read_config_dword(pdev, pos + 8, &control); pci_read_config_dword(pdev, pos + 8, &control); pci_write_config_dword(pdev, pos + 8, c8c9value); pdev->devfn++; } pdev->devfn = pdevfuncsave; } static void netxen_set_msix_bit(struct pci_dev *pdev, int enable) { u32 control; if (pdev->msix_cap) { pci_read_config_dword(pdev, pdev->msix_cap, &control); if (enable) control |= PCI_MSIX_FLAGS_ENABLE; else control = 0; pci_write_config_dword(pdev, pdev->msix_cap, control); } } static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count) { int i; for (i = 0; i < count; i++) adapter->msix_entries[i].entry = i; } static int netxen_read_mac_addr(struct netxen_adapter *adapter) { int i; unsigned char *p; u64 mac_addr; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; u8 addr[ETH_ALEN]; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) return -EIO; } else { if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0) return -EIO; } p = (unsigned char *)&mac_addr; for (i = 0; i < 6; i++) addr[i] = *(p + 5 - i); eth_hw_addr_set(netdev, addr); memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); /* set station address */ if (!is_valid_ether_addr(netdev->dev_addr)) dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr); return 0; } static int netxen_nic_set_mac(struct net_device *netdev, void *p) { struct netxen_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (netif_running(netdev)) { netif_device_detach(netdev); netxen_napi_disable(adapter); } memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); eth_hw_addr_set(netdev, addr->sa_data); adapter->macaddr_set(adapter, addr->sa_data); if (netif_running(netdev)) { netif_device_attach(netdev); netxen_napi_enable(adapter); } return 0; } static void netxen_set_multicast_list(struct net_device *dev) { struct netxen_adapter *adapter = netdev_priv(dev); adapter->set_multi(dev); } static netdev_features_t netxen_fix_features(struct net_device *dev, netdev_features_t features) { if (!(features & NETIF_F_RXCSUM)) { netdev_info(dev, "disabling LRO as RXCSUM is off\n"); features &= ~NETIF_F_LRO; } return features; } static int netxen_set_features(struct net_device *dev, netdev_features_t features) { struct netxen_adapter *adapter = netdev_priv(dev); int hw_lro; if (!((dev->features ^ features) & NETIF_F_LRO)) return 0; hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED : NETXEN_NIC_LRO_DISABLED; if (netxen_config_hw_lro(adapter, hw_lro)) return -EIO; if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter)) return -EIO; return 0; } static const struct net_device_ops netxen_netdev_ops = { .ndo_open = netxen_nic_open, .ndo_stop = netxen_nic_close, .ndo_start_xmit = netxen_nic_xmit_frame, .ndo_get_stats64 = netxen_nic_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = netxen_set_multicast_list, .ndo_set_mac_address = netxen_nic_set_mac, .ndo_change_mtu = netxen_nic_change_mtu, .ndo_tx_timeout = netxen_tx_timeout, .ndo_fix_features = netxen_fix_features, .ndo_set_features = netxen_set_features, }; static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter, u32 mode) { NXWR32(adapter, NETXEN_INTR_MODE_REG, mode); } static inline u32 netxen_get_interrupt_mode(struct netxen_adapter *adapter) { return NXRD32(adapter, NETXEN_INTR_MODE_REG); } static void netxen_initialize_interrupt_registers(struct netxen_adapter *adapter) { struct netxen_legacy_intr_set *legacy_intrp; u32 tgt_status_reg, int_state_reg; if (adapter->ahw.revision_id >= NX_P3_B0) legacy_intrp = &legacy_intr[adapter->ahw.pci_func]; else legacy_intrp = &legacy_intr[0]; tgt_status_reg = legacy_intrp->tgt_status_reg; int_state_reg = ISR_INT_STATE_REG; adapter->int_vec_bit = legacy_intrp->int_vec_bit; adapter->tgt_status_reg = netxen_get_ioaddr(adapter, tgt_status_reg); adapter->tgt_mask_reg = netxen_get_ioaddr(adapter, legacy_intrp->tgt_mask_reg); adapter->pci_int_reg = netxen_get_ioaddr(adapter, legacy_intrp->pci_int_reg); adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR); if (adapter->ahw.revision_id >= NX_P3_B1) adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, int_state_reg); else adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, CRB_INT_VECTOR); } static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter, int num_msix) { struct pci_dev *pdev = adapter->pdev; u32 value; int err; if (adapter->msix_supported) { netxen_init_msix_entries(adapter, num_msix); err = pci_enable_msix_range(pdev, adapter->msix_entries, num_msix, num_msix); if (err > 0) { adapter->flags |= NETXEN_NIC_MSIX_ENABLED; netxen_set_msix_bit(pdev, 1); if (adapter->rss_supported) adapter->max_sds_rings = num_msix; dev_info(&pdev->dev, "using msi-x interrupts\n"); return 0; } /* fall through for msi */ } if (use_msi && !pci_enable_msi(pdev)) { value = msi_tgt_status[adapter->ahw.pci_func]; adapter->flags |= NETXEN_NIC_MSI_ENABLED; adapter->tgt_status_reg = netxen_get_ioaddr(adapter, value); adapter->msix_entries[0].vector = pdev->irq; dev_info(&pdev->dev, "using msi interrupts\n"); return 0; } dev_err(&pdev->dev, "Failed to acquire MSI-X/MSI interrupt vector\n"); return -EIO; } static int netxen_setup_intr(struct netxen_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int num_msix; if (adapter->rss_supported) num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ? MSIX_ENTRIES_PER_ADAPTER : 2; else num_msix = 1; adapter->max_sds_rings = 1; adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); netxen_initialize_interrupt_registers(adapter); netxen_set_msix_bit(pdev, 0); if (adapter->portnum == 0) { if (!netxen_setup_msi_interrupts(adapter, num_msix)) netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE); else netxen_set_interrupt_mode(adapter, NETXEN_INTX_MODE); } else { if (netxen_get_interrupt_mode(adapter) == NETXEN_MSI_MODE && netxen_setup_msi_interrupts(adapter, num_msix)) { dev_err(&pdev->dev, "Co-existence of MSI-X/MSI and INTx interrupts is not supported\n"); return -EIO; } } if (!NETXEN_IS_MSI_FAMILY(adapter)) { adapter->msix_entries[0].vector = pdev->irq; dev_info(&pdev->dev, "using legacy interrupts\n"); } return 0; } static void netxen_teardown_intr(struct netxen_adapter *adapter) { if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) pci_disable_msix(adapter->pdev); if (adapter->flags & NETXEN_NIC_MSI_ENABLED) pci_disable_msi(adapter->pdev); } static void netxen_cleanup_pci_map(struct netxen_adapter *adapter) { if (adapter->ahw.db_base != NULL) iounmap(adapter->ahw.db_base); if (adapter->ahw.pci_base0 != NULL) iounmap(adapter->ahw.pci_base0); if (adapter->ahw.pci_base1 != NULL) iounmap(adapter->ahw.pci_base1); if (adapter->ahw.pci_base2 != NULL) iounmap(adapter->ahw.pci_base2); } static int netxen_setup_pci_map(struct netxen_adapter *adapter) { void __iomem *db_ptr = NULL; resource_size_t mem_base, db_base; unsigned long mem_len, db_len = 0; struct pci_dev *pdev = adapter->pdev; int pci_func = adapter->ahw.pci_func; struct netxen_hardware_context *ahw = &adapter->ahw; int err = 0; /* * Set the CRB window to invalid. If any register in window 0 is * accessed it should set the window to 0 and then reset it to 1. */ adapter->ahw.crb_win = -1; adapter->ahw.ocm_win = -1; /* remap phys address */ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ mem_len = pci_resource_len(pdev, 0); /* 128 Meg of memory */ if (mem_len == NETXEN_PCI_128MB_SIZE) { ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_SIZE); ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) { dev_err(&pdev->dev, "failed to map PCI bar 0\n"); err = -EIO; goto err_out; } ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE; } else if (mem_len == NETXEN_PCI_32MB_SIZE) { ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) { dev_err(&pdev->dev, "failed to map PCI bar 0\n"); err = -EIO; goto err_out; } } else if (mem_len == NETXEN_PCI_2MB_SIZE) { ahw->pci_base0 = pci_ioremap_bar(pdev, 0); if (ahw->pci_base0 == NULL) { dev_err(&pdev->dev, "failed to map PCI bar 0\n"); return -EIO; } ahw->pci_len0 = mem_len; } else { return -EIO; } netxen_setup_hwops(adapter); dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func))); } if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) goto skip_doorbell; db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ db_len = pci_resource_len(pdev, 4); if (db_len == 0) { printk(KERN_ERR "%s: doorbell is disabled\n", netxen_nic_driver_name); err = -EIO; goto err_out; } db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES); if (!db_ptr) { printk(KERN_ERR "%s: Failed to allocate doorbell map.", netxen_nic_driver_name); err = -EIO; goto err_out; } skip_doorbell: adapter->ahw.db_base = db_ptr; adapter->ahw.db_len = db_len; return 0; err_out: netxen_cleanup_pci_map(adapter); return err; } static void netxen_check_options(struct netxen_adapter *adapter) { u32 fw_major, fw_minor, fw_build, prev_fw_version; char brd_name[NETXEN_MAX_SHORT_NAME]; char serial_num[32]; int i, offset, val, err; __le32 *ptr32; struct pci_dev *pdev = adapter->pdev; adapter->driver_mismatch = 0; ptr32 = (__le32 *)&serial_num; offset = NX_FW_SERIAL_NUM_OFFSET; for (i = 0; i < 8; i++) { err = netxen_rom_fast_read(adapter, offset, &val); if (err) { dev_err(&pdev->dev, "error reading board info\n"); adapter->driver_mismatch = 1; return; } ptr32[i] = cpu_to_le32(val); offset += sizeof(u32); } fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); prev_fw_version = adapter->fw_version; adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build); /* Get FW Mini Coredump template and store it */ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { if (adapter->mdump.md_template == NULL || adapter->fw_version > prev_fw_version) { kfree(adapter->mdump.md_template); adapter->mdump.md_template = NULL; err = netxen_setup_minidump(adapter); if (err) dev_err(&adapter->pdev->dev, "Failed to setup minidump rcode = %d\n", err); } } if (adapter->portnum == 0) { if (netxen_nic_get_brd_name_by_type(adapter->ahw.board_type, brd_name)) strcpy(serial_num, "Unknown"); pr_info("%s: %s Board S/N %s Chip rev 0x%x\n", module_name(THIS_MODULE), brd_name, serial_num, adapter->ahw.revision_id); } if (adapter->fw_version < NETXEN_VERSION_CODE(3, 4, 216)) { adapter->driver_mismatch = 1; dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n", fw_major, fw_minor, fw_build); return; } if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { i = NXRD32(adapter, NETXEN_SRE_MISC); adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0; } dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d [%s]\n", NETXEN_NIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build, adapter->ahw.cut_through ? "cut-through" : "legacy"); if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222)) adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1); if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) { adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; } adapter->msix_supported = 0; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { adapter->msix_supported = !!use_msi_x; adapter->rss_supported = !!use_msi_x; } else { u32 flashed_ver = 0; netxen_rom_fast_read(adapter, NX_FW_VERSION_OFFSET, (int *)&flashed_ver); flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) { switch (adapter->ahw.board_type) { case NETXEN_BRDTYPE_P2_SB31_10G: case NETXEN_BRDTYPE_P2_SB31_10G_CX4: adapter->msix_supported = !!use_msi_x; adapter->rss_supported = !!use_msi_x; break; default: break; } } } adapter->num_txd = MAX_CMD_DESCRIPTORS; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS; adapter->max_rds_rings = 3; } else { adapter->num_lro_rxd = 0; adapter->max_rds_rings = 2; } } static int netxen_start_firmware(struct netxen_adapter *adapter) { int val, err, first_boot; struct pci_dev *pdev = adapter->pdev; /* required for NX2031 dummy dma */ err = nx_set_dma_mask(adapter); if (err) return err; err = netxen_can_start_firmware(adapter); if (err < 0) return err; if (!err) goto wait_init; first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); err = netxen_check_hw_init(adapter, first_boot); if (err) { dev_err(&pdev->dev, "error in init HW init sequence\n"); return err; } netxen_request_firmware(adapter); err = netxen_need_fw_reset(adapter); if (err < 0) goto err_out; if (err == 0) goto pcie_strap_init; if (first_boot != 0x55555555) { NXWR32(adapter, CRB_CMDPEG_STATE, 0); netxen_pinit_from_rom(adapter); msleep(1); } NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555); NXWR32(adapter, NETXEN_PEG_HALT_STATUS1, 0); NXWR32(adapter, NETXEN_PEG_HALT_STATUS2, 0); if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) netxen_set_port_mode(adapter); err = netxen_load_firmware(adapter); if (err) goto err_out; netxen_release_firmware(adapter); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { /* Initialize multicast addr pool owners */ val = 0x7654; if (adapter->ahw.port_type == NETXEN_NIC_XGBE) val |= 0x0f000000; NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); } err = netxen_init_dummy_dma(adapter); if (err) goto err_out; /* * Tell the hardware our version number. */ val = (_NETXEN_NIC_LINUX_MAJOR << 16) | ((_NETXEN_NIC_LINUX_MINOR << 8)) | (_NETXEN_NIC_LINUX_SUBVERSION); NXWR32(adapter, CRB_DRIVER_VERSION, val); pcie_strap_init: if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) netxen_pcie_strap_init(adapter); wait_init: /* Handshake with the card before we register the devices. */ err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); if (err) { netxen_free_dummy_dma(adapter); goto err_out; } NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY); nx_update_dma_mask(adapter); netxen_check_options(adapter); adapter->need_fw_reset = 0; /* fall through and release firmware */ err_out: netxen_release_firmware(adapter); return err; } static int netxen_nic_request_irq(struct netxen_adapter *adapter) { irq_handler_t handler; struct nx_host_sds_ring *sds_ring; int err, ring; unsigned long flags = 0; struct net_device *netdev = adapter->netdev; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) handler = netxen_msix_intr; else if (adapter->flags & NETXEN_NIC_MSI_ENABLED) handler = netxen_msi_intr; else { flags |= IRQF_SHARED; handler = netxen_intr; } adapter->irq = netdev->irq; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; sprintf(sds_ring->name, "%s[%d]", netdev->name, ring); err = request_irq(sds_ring->irq, handler, flags, sds_ring->name, sds_ring); if (err) return err; } return 0; } static void netxen_nic_free_irq(struct netxen_adapter *adapter) { int ring; struct nx_host_sds_ring *sds_ring; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; free_irq(sds_ring->irq, sds_ring); } } static void netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter) { adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; adapter->coal.normal.data.rx_time_us = NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; adapter->coal.normal.data.rx_packets = NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; adapter->coal.normal.data.tx_time_us = NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US; adapter->coal.normal.data.tx_packets = NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS; } /* with rtnl_lock */ static int __netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) { int err; if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return -EIO; err = adapter->init_port(adapter, adapter->physical_port); if (err) { printk(KERN_ERR "%s: Failed to initialize port %d\n", netxen_nic_driver_name, adapter->portnum); return err; } if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) adapter->macaddr_set(adapter, adapter->mac_addr); adapter->set_multi(netdev); adapter->set_mtu(adapter, netdev->mtu); adapter->ahw.linkup = 0; if (adapter->max_sds_rings > 1) netxen_config_rss(adapter, 1); if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) netxen_config_intr_coalesce(adapter); if (netdev->features & NETIF_F_LRO) netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED); netxen_napi_enable(adapter); if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) netxen_linkevent_request(adapter, 1); else netxen_nic_set_link_parameters(adapter); set_bit(__NX_DEV_UP, &adapter->state); return 0; } /* Usage: During resume and firmware recovery module.*/ static inline int netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) { int err = 0; rtnl_lock(); if (netif_running(netdev)) err = __netxen_nic_up(adapter, netdev); rtnl_unlock(); return err; } /* with rtnl_lock */ static void __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) { if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return; if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state)) return; smp_mb(); netif_carrier_off(netdev); netif_tx_disable(netdev); if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) netxen_linkevent_request(adapter, 0); if (adapter->stop_port) adapter->stop_port(adapter); if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) netxen_p3_free_mac_list(adapter); adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE); netxen_napi_disable(adapter); netxen_release_tx_buffers(adapter); } /* Usage: During suspend and firmware recovery module */ static inline void netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) { rtnl_lock(); if (netif_running(netdev)) __netxen_nic_down(adapter, netdev); rtnl_unlock(); } static int netxen_nic_attach(struct netxen_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; int err, ring; struct nx_host_rds_ring *rds_ring; struct nx_host_tx_ring *tx_ring; u32 capab2; if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) return 0; err = netxen_init_firmware(adapter); if (err) return err; adapter->flags &= ~NETXEN_FW_MSS_CAP; if (adapter->capabilities & NX_FW_CAPABILITY_MORE_CAPS) { capab2 = NXRD32(adapter, CRB_FW_CAPABILITIES_2); if (capab2 & NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG) adapter->flags |= NETXEN_FW_MSS_CAP; } err = netxen_napi_add(adapter, netdev); if (err) return err; err = netxen_alloc_sw_resources(adapter); if (err) { printk(KERN_ERR "%s: Error in setting sw resources\n", netdev->name); return err; } err = netxen_alloc_hw_resources(adapter); if (err) { printk(KERN_ERR "%s: Error in setting hw resources\n", netdev->name); goto err_out_free_sw; } if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { tx_ring = adapter->tx_ring; tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, crb_cmd_producer[adapter->portnum]); tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter, crb_cmd_consumer[adapter->portnum]); tx_ring->producer = 0; tx_ring->sw_consumer = 0; netxen_nic_update_cmd_producer(adapter, tx_ring); netxen_nic_update_cmd_consumer(adapter, tx_ring); } for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &adapter->recv_ctx.rds_rings[ring]; netxen_post_rx_buffers(adapter, ring, rds_ring); } err = netxen_nic_request_irq(adapter); if (err) { dev_err(&pdev->dev, "%s: failed to setup interrupt\n", netdev->name); goto err_out_free_rxbuf; } if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) netxen_nic_init_coalesce_defaults(adapter); netxen_create_sysfs_entries(adapter); adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; return 0; err_out_free_rxbuf: netxen_release_rx_buffers(adapter); netxen_free_hw_resources(adapter); err_out_free_sw: netxen_free_sw_resources(adapter); return err; } static void netxen_nic_detach(struct netxen_adapter *adapter) { if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return; netxen_remove_sysfs_entries(adapter); netxen_free_hw_resources(adapter); netxen_release_rx_buffers(adapter); netxen_nic_free_irq(adapter); netxen_napi_del(adapter); netxen_free_sw_resources(adapter); adapter->is_up = 0; } int netxen_nic_reset_context(struct netxen_adapter *adapter) { int err = 0; struct net_device *netdev = adapter->netdev; if (test_and_set_bit(__NX_RESETTING, &adapter->state)) return -EBUSY; if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { netif_device_detach(netdev); if (netif_running(netdev)) __netxen_nic_down(adapter, netdev); netxen_nic_detach(adapter); if (netif_running(netdev)) { err = netxen_nic_attach(adapter); if (!err) err = __netxen_nic_up(adapter, netdev); if (err) goto done; } netif_device_attach(netdev); } done: clear_bit(__NX_RESETTING, &adapter->state); return err; } static int netxen_setup_netdev(struct netxen_adapter *adapter, struct net_device *netdev) { int err = 0; struct pci_dev *pdev = adapter->pdev; adapter->mc_enabled = 0; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) adapter->max_mc_count = 38; else adapter->max_mc_count = 16; netdev->netdev_ops = &netxen_netdev_ops; netdev->watchdog_timeo = 5*HZ; netxen_nic_change_mtu(netdev, netdev->mtu); netdev->ethtool_ops = &netxen_nic_ethtool_ops; netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_RXCSUM; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; netdev->vlan_features |= netdev->hw_features; if (adapter->pci_using_dac) { netdev->features |= NETIF_F_HIGHDMA; netdev->vlan_features |= NETIF_F_HIGHDMA; } if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO) netdev->hw_features |= NETIF_F_LRO; netdev->features |= netdev->hw_features; netdev->irq = adapter->msix_entries[0].vector; INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); if (netxen_read_mac_addr(adapter)) dev_warn(&pdev->dev, "failed to read mac addr\n"); netif_carrier_off(netdev); err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "failed to register net device\n"); return err; } return 0; } #define NETXEN_ULA_ADAPTER_KEY (0xdaddad01) #define NETXEN_NON_ULA_ADAPTER_KEY (0xdaddad00) static void netxen_read_ula_info(struct netxen_adapter *adapter) { u32 temp; /* Print ULA info only once for an adapter */ if (adapter->portnum != 0) return; temp = NXRD32(adapter, NETXEN_ULA_KEY); switch (temp) { case NETXEN_ULA_ADAPTER_KEY: dev_info(&adapter->pdev->dev, "ULA adapter"); break; case NETXEN_NON_ULA_ADAPTER_KEY: dev_info(&adapter->pdev->dev, "non ULA adapter"); break; default: break; } return; } #ifdef CONFIG_PCIEAER static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct pci_dev *root = pdev->bus->self; u32 aer_pos; /* root bus? */ if (!root) return; if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM && adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) return; if (pci_pcie_type(root) != PCI_EXP_TYPE_ROOT_PORT) return; aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR); if (!aer_pos) return; pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff); } #endif static int netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev = NULL; struct netxen_adapter *adapter = NULL; int i = 0, err; int pci_func_id = PCI_FUNC(pdev->devfn); uint8_t revision_id; u32 val; if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { pr_warn("%s: chip revisions between 0x%x-0x%x will not be enabled\n", module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); return -ENODEV; } if ((err = pci_enable_device(pdev))) return err; if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { err = -ENODEV; goto err_out_disable_pdev; } if ((err = pci_request_regions(pdev, netxen_nic_driver_name))) goto err_out_disable_pdev; pci_set_master(pdev); netdev = alloc_etherdev(sizeof(struct netxen_adapter)); if(!netdev) { err = -ENOMEM; goto err_out_free_res; } SET_NETDEV_DEV(netdev, &pdev->dev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; adapter->ahw.pci_func = pci_func_id; revision_id = pdev->revision; adapter->ahw.revision_id = revision_id; rwlock_init(&adapter->ahw.crb_lock); spin_lock_init(&adapter->ahw.mem_lock); spin_lock_init(&adapter->tx_clean_lock); INIT_LIST_HEAD(&adapter->mac_list); INIT_LIST_HEAD(&adapter->ip_list); err = netxen_setup_pci_map(adapter); if (err) goto err_out_free_netdev; /* This will be reset for mezz cards */ adapter->portnum = pci_func_id; err = netxen_nic_get_board_info(adapter); if (err) { dev_err(&pdev->dev, "Error getting board config info.\n"); goto err_out_iounmap; } #ifdef CONFIG_PCIEAER netxen_mask_aer_correctable(adapter); #endif /* Mezz cards have PCI function 0,2,3 enabled */ switch (adapter->ahw.board_type) { case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: if (pci_func_id >= 2) adapter->portnum = pci_func_id - 2; break; default: break; } err = netxen_check_flash_fw_compatibility(adapter); if (err) goto err_out_iounmap; if (adapter->portnum == 0) { val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); if (val != 0xffffffff && val != 0) { NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0); adapter->need_fw_reset = 1; } } err = netxen_start_firmware(adapter); if (err) goto err_out_decr_ref; /* * See if the firmware gave us a virtual-physical port mapping. */ adapter->physical_port = adapter->portnum; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { i = NXRD32(adapter, CRB_V2P(adapter->portnum)); if (i != 0x55555555) adapter->physical_port = i; } /* MTU range: 0 - 8000 (P2) or 9600 (P3) */ netdev->min_mtu = 0; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) netdev->max_mtu = P3_MAX_MTU; else netdev->max_mtu = P2_MAX_MTU; netxen_nic_clear_stats(adapter); err = netxen_setup_intr(adapter); if (err) { dev_err(&adapter->pdev->dev, "Failed to setup interrupts, error = %d\n", err); goto err_out_disable_msi; } netxen_read_ula_info(adapter); err = netxen_setup_netdev(adapter, netdev); if (err) goto err_out_disable_msi; pci_set_drvdata(pdev, adapter); netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); switch (adapter->ahw.port_type) { case NETXEN_NIC_GBE: dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", adapter->netdev->name); break; case NETXEN_NIC_XGBE: dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", adapter->netdev->name); break; } netxen_create_diag_entries(adapter); return 0; err_out_disable_msi: netxen_teardown_intr(adapter); netxen_free_dummy_dma(adapter); err_out_decr_ref: nx_decr_dev_ref_cnt(adapter); err_out_iounmap: netxen_cleanup_pci_map(adapter); err_out_free_netdev: free_netdev(netdev); err_out_free_res: pci_release_regions(pdev); err_out_disable_pdev: pci_disable_device(pdev); return err; } static void netxen_cleanup_minidump(struct netxen_adapter *adapter) { kfree(adapter->mdump.md_template); adapter->mdump.md_template = NULL; if (adapter->mdump.md_capture_buff) { vfree(adapter->mdump.md_capture_buff); adapter->mdump.md_capture_buff = NULL; } } static void netxen_nic_remove(struct pci_dev *pdev) { struct netxen_adapter *adapter; struct net_device *netdev; adapter = pci_get_drvdata(pdev); if (adapter == NULL) return; netdev = adapter->netdev; netxen_cancel_fw_work(adapter); unregister_netdev(netdev); cancel_work_sync(&adapter->tx_timeout_task); netxen_free_ip_list(adapter, false); netxen_nic_detach(adapter); nx_decr_dev_ref_cnt(adapter); if (adapter->portnum == 0) netxen_free_dummy_dma(adapter); clear_bit(__NX_RESETTING, &adapter->state); netxen_teardown_intr(adapter); netxen_set_interrupt_mode(adapter, 0); netxen_remove_diag_entries(adapter); netxen_cleanup_pci_map(adapter); netxen_release_firmware(adapter); if (NX_IS_REVISION_P3(pdev->revision)) netxen_cleanup_minidump(adapter); pci_release_regions(pdev); pci_disable_device(pdev); free_netdev(netdev); } static void netxen_nic_detach_func(struct netxen_adapter *adapter) { struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); netxen_cancel_fw_work(adapter); if (netif_running(netdev)) netxen_nic_down(adapter, netdev); cancel_work_sync(&adapter->tx_timeout_task); netxen_nic_detach(adapter); if (adapter->portnum == 0) netxen_free_dummy_dma(adapter); nx_decr_dev_ref_cnt(adapter); clear_bit(__NX_RESETTING, &adapter->state); } static int netxen_nic_attach_late_func(struct pci_dev *pdev) { struct netxen_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; int err; pci_set_master(pdev); adapter->ahw.crb_win = -1; adapter->ahw.ocm_win = -1; err = netxen_start_firmware(adapter); if (err) { dev_err(&pdev->dev, "failed to start firmware\n"); return err; } if (netif_running(netdev)) { err = netxen_nic_attach(adapter); if (err) goto err_out; err = netxen_nic_up(adapter, netdev); if (err) goto err_out_detach; netxen_restore_indev_addr(netdev, NETDEV_UP); } netif_device_attach(netdev); netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); return 0; err_out_detach: netxen_nic_detach(adapter); err_out: nx_decr_dev_ref_cnt(adapter); return err; } static int netxen_nic_attach_func(struct pci_dev *pdev) { int err; err = pci_enable_device(pdev); if (err) return err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); return netxen_nic_attach_late_func(pdev); } static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct netxen_adapter *adapter = pci_get_drvdata(pdev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (nx_dev_request_aer(adapter)) return PCI_ERS_RESULT_RECOVERED; netxen_nic_detach_func(adapter); pci_disable_device(pdev); return PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev) { int err = 0; err = netxen_nic_attach_func(pdev); return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } static void netxen_nic_shutdown(struct pci_dev *pdev) { struct netxen_adapter *adapter = pci_get_drvdata(pdev); netxen_nic_detach_func(adapter); if (pci_save_state(pdev)) return; if (netxen_nic_wol_supported(adapter)) { pci_enable_wake(pdev, PCI_D3cold, 1); pci_enable_wake(pdev, PCI_D3hot, 1); } pci_disable_device(pdev); } static int __maybe_unused netxen_nic_suspend(struct device *dev_d) { struct netxen_adapter *adapter = dev_get_drvdata(dev_d); netxen_nic_detach_func(adapter); if (netxen_nic_wol_supported(adapter)) device_wakeup_enable(dev_d); return 0; } static int __maybe_unused netxen_nic_resume(struct device *dev_d) { return netxen_nic_attach_late_func(to_pci_dev(dev_d)); } static int netxen_nic_open(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); int err = 0; if (adapter->driver_mismatch) return -EIO; err = netxen_nic_attach(adapter); if (err) return err; err = __netxen_nic_up(adapter, netdev); if (err) goto err_out; netif_start_queue(netdev); return 0; err_out: netxen_nic_detach(adapter); return err; } /* * netxen_nic_close - Disables a network interface entry point */ static int netxen_nic_close(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); __netxen_nic_down(adapter, netdev); return 0; } static void netxen_tso_check(struct net_device *netdev, struct nx_host_tx_ring *tx_ring, struct cmd_desc_type0 *first_desc, struct sk_buff *skb) { u8 opcode = TX_ETHER_PKT; __be16 protocol = skb->protocol; u16 flags = 0, vid = 0; u32 producer; int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; struct cmd_desc_type0 *hwdesc; struct vlan_ethhdr *vh; if (protocol == cpu_to_be16(ETH_P_8021Q)) { vh = skb_vlan_eth_hdr(skb); protocol = vh->h_vlan_encapsulated_proto; flags = FLAGS_VLAN_TAGGED; } else if (skb_vlan_tag_present(skb)) { flags = FLAGS_VLAN_OOB; vid = skb_vlan_tag_get(skb); netxen_set_tx_vlan_tci(first_desc, vid); vlan_oob = 1; } if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && skb_shinfo(skb)->gso_size > 0) { hdr_len = skb_tcp_all_headers(skb); first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); first_desc->total_hdr_length = hdr_len; if (vlan_oob) { first_desc->total_hdr_length += VLAN_HLEN; first_desc->tcp_hdr_offset = VLAN_HLEN; first_desc->ip_hdr_offset = VLAN_HLEN; /* Only in case of TSO on vlan device */ flags |= FLAGS_VLAN_TAGGED; } opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ? TX_TCP_LSO6 : TX_TCP_LSO; tso = 1; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4proto; if (protocol == cpu_to_be16(ETH_P_IP)) { l4proto = ip_hdr(skb)->protocol; if (l4proto == IPPROTO_TCP) opcode = TX_TCP_PKT; else if(l4proto == IPPROTO_UDP) opcode = TX_UDP_PKT; } else if (protocol == cpu_to_be16(ETH_P_IPV6)) { l4proto = ipv6_hdr(skb)->nexthdr; if (l4proto == IPPROTO_TCP) opcode = TX_TCPV6_PKT; else if(l4proto == IPPROTO_UDP) opcode = TX_UDPV6_PKT; } } first_desc->tcp_hdr_offset += skb_transport_offset(skb); first_desc->ip_hdr_offset += skb_network_offset(skb); netxen_set_tx_flags_opcode(first_desc, flags, opcode); if (!tso) return; /* For LSO, we need to copy the MAC/IP/TCP headers into * the descriptor ring */ producer = tx_ring->producer; copied = 0; offset = 2; if (vlan_oob) { /* Create a TSO vlan header template for firmware */ hwdesc = &tx_ring->desc_head[producer]; tx_ring->cmd_buf_arr[producer].skb = NULL; copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, hdr_len + VLAN_HLEN); vh = (struct vlan_ethhdr *)((char *)hwdesc + 2); skb_copy_from_linear_data(skb, vh, 12); vh->h_vlan_proto = htons(ETH_P_8021Q); vh->h_vlan_TCI = htons(vid); skb_copy_from_linear_data_offset(skb, 12, (char *)vh + 16, copy_len - 16); copied = copy_len - VLAN_HLEN; offset = 0; producer = get_next_index(producer, tx_ring->num_desc); } while (copied < hdr_len) { copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, (hdr_len - copied)); hwdesc = &tx_ring->desc_head[producer]; tx_ring->cmd_buf_arr[producer].skb = NULL; skb_copy_from_linear_data_offset(skb, copied, (char *)hwdesc + offset, copy_len); copied += copy_len; offset = 0; producer = get_next_index(producer, tx_ring->num_desc); } tx_ring->producer = producer; barrier(); } static int netxen_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, struct netxen_cmd_buffer *pbuf) { struct netxen_skb_frag *nf; skb_frag_t *frag; int i, nr_frags; dma_addr_t map; nr_frags = skb_shinfo(skb)->nr_frags; nf = &pbuf->frag_array[0]; map = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, map)) goto out_err; nf->dma = map; nf->length = skb_headlen(skb); for (i = 0; i < nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; nf = &pbuf->frag_array[i+1]; map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, map)) goto unwind; nf->dma = map; nf->length = skb_frag_size(frag); } return 0; unwind: while (--i >= 0) { nf = &pbuf->frag_array[i+1]; dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE); nf->dma = 0ULL; } nf = &pbuf->frag_array[0]; dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE); nf->dma = 0ULL; out_err: return -ENOMEM; } static inline void netxen_clear_cmddesc(u64 *desc) { desc[0] = 0ULL; desc[2] = 0ULL; } static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); struct nx_host_tx_ring *tx_ring = adapter->tx_ring; struct netxen_cmd_buffer *pbuf; struct netxen_skb_frag *buffrag; struct cmd_desc_type0 *hwdesc, *first_desc; struct pci_dev *pdev; int i, k; int delta = 0; skb_frag_t *frag; u32 producer; int frag_count; u32 num_txd = tx_ring->num_desc; frag_count = skb_shinfo(skb)->nr_frags + 1; /* 14 frags supported for normal packet and * 32 frags supported for TSO packet */ if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) { for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) { frag = &skb_shinfo(skb)->frags[i]; delta += skb_frag_size(frag); } if (!__pskb_pull_tail(skb, delta)) goto drop_packet; frag_count = 1 + skb_shinfo(skb)->nr_frags; } if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) { netif_stop_queue(netdev); smp_mb(); if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) netif_start_queue(netdev); else return NETDEV_TX_BUSY; } producer = tx_ring->producer; pbuf = &tx_ring->cmd_buf_arr[producer]; pdev = adapter->pdev; if (netxen_map_tx_skb(pdev, skb, pbuf)) goto drop_packet; pbuf->skb = skb; pbuf->frag_count = frag_count; first_desc = hwdesc = &tx_ring->desc_head[producer]; netxen_clear_cmddesc((u64 *)hwdesc); netxen_set_tx_frags_len(first_desc, frag_count, skb->len); netxen_set_tx_port(first_desc, adapter->portnum); for (i = 0; i < frag_count; i++) { k = i % 4; if ((k == 0) && (i > 0)) { /* move to next desc.*/ producer = get_next_index(producer, num_txd); hwdesc = &tx_ring->desc_head[producer]; netxen_clear_cmddesc((u64 *)hwdesc); tx_ring->cmd_buf_arr[producer].skb = NULL; } buffrag = &pbuf->frag_array[i]; hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); switch (k) { case 0: hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); break; case 1: hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); break; case 2: hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); break; case 3: hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); break; } } tx_ring->producer = get_next_index(producer, num_txd); netxen_tso_check(netdev, tx_ring, first_desc, skb); adapter->stats.txbytes += skb->len; adapter->stats.xmitcalled++; netxen_nic_update_cmd_producer(adapter, tx_ring); return NETDEV_TX_OK; drop_packet: adapter->stats.txdropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } static int netxen_nic_check_temp(struct netxen_adapter *adapter) { struct net_device *netdev = adapter->netdev; uint32_t temp, temp_state, temp_val; int rv = 0; temp = NXRD32(adapter, CRB_TEMP_STATE); temp_state = nx_get_temp_state(temp); temp_val = nx_get_temp_val(temp); if (temp_state == NX_TEMP_PANIC) { printk(KERN_ALERT "%s: Device temperature %d degrees C exceeds" " maximum allowed. Hardware has been shut down.\n", netdev->name, temp_val); rv = 1; } else if (temp_state == NX_TEMP_WARN) { if (adapter->temp == NX_TEMP_NORMAL) { printk(KERN_ALERT "%s: Device temperature %d degrees C " "exceeds operating range." " Immediate action needed.\n", netdev->name, temp_val); } } else { if (adapter->temp == NX_TEMP_WARN) { printk(KERN_INFO "%s: Device temperature is now %d degrees C" " in normal range.\n", netdev->name, temp_val); } } adapter->temp = temp_state; return rv; } void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup) { struct net_device *netdev = adapter->netdev; if (adapter->ahw.linkup && !linkup) { printk(KERN_INFO "%s: %s NIC Link is down\n", netxen_nic_driver_name, netdev->name); adapter->ahw.linkup = 0; if (netif_running(netdev)) { netif_carrier_off(netdev); netif_stop_queue(netdev); } adapter->link_changed = !adapter->has_link_events; } else if (!adapter->ahw.linkup && linkup) { printk(KERN_INFO "%s: %s NIC Link is up\n", netxen_nic_driver_name, netdev->name); adapter->ahw.linkup = 1; if (netif_running(netdev)) { netif_carrier_on(netdev); netif_wake_queue(netdev); } adapter->link_changed = !adapter->has_link_events; } } static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) { u32 val, port, linkup; port = adapter->physical_port; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { val = NXRD32(adapter, CRB_XG_STATE_P3); val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); linkup = (val == XG_LINK_UP_P3); } else { val = NXRD32(adapter, CRB_XG_STATE); val = (val >> port*8) & 0xff; linkup = (val == XG_LINK_UP); } netxen_advert_link_change(adapter, linkup); } static void netxen_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct netxen_adapter *adapter = netdev_priv(netdev); if (test_bit(__NX_RESETTING, &adapter->state)) return; dev_err(&netdev->dev, "transmit timeout, resetting.\n"); schedule_work(&adapter->tx_timeout_task); } static void netxen_tx_timeout_task(struct work_struct *work) { struct netxen_adapter *adapter = container_of(work, struct netxen_adapter, tx_timeout_task); if (!netif_running(adapter->netdev)) return; if (test_and_set_bit(__NX_RESETTING, &adapter->state)) return; if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS) goto request_reset; rtnl_lock(); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { /* try to scrub interrupt */ netxen_napi_disable(adapter); netxen_napi_enable(adapter); netif_wake_queue(adapter->netdev); clear_bit(__NX_RESETTING, &adapter->state); } else { clear_bit(__NX_RESETTING, &adapter->state); if (netxen_nic_reset_context(adapter)) { rtnl_unlock(); goto request_reset; } } netif_trans_update(adapter->netdev); rtnl_unlock(); return; request_reset: adapter->need_fw_reset = 1; clear_bit(__NX_RESETTING, &adapter->state); } static void netxen_nic_get_stats(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct netxen_adapter *adapter = netdev_priv(netdev); stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; stats->tx_packets = adapter->stats.xmitfinished; stats->rx_bytes = adapter->stats.rxbytes; stats->tx_bytes = adapter->stats.txbytes; stats->rx_dropped = adapter->stats.rxdropped; stats->tx_dropped = adapter->stats.txdropped; } static irqreturn_t netxen_intr(int irq, void *data) { struct nx_host_sds_ring *sds_ring = data; struct netxen_adapter *adapter = sds_ring->adapter; u32 status = 0; status = readl(adapter->isr_int_vec); if (!(status & adapter->int_vec_bit)) return IRQ_NONE; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { /* check interrupt state machine, to be sure */ status = readl(adapter->crb_int_state_reg); if (!ISR_LEGACY_INT_TRIGGERED(status)) return IRQ_NONE; } else { unsigned long our_int = 0; our_int = readl(adapter->crb_int_state_reg); /* not our interrupt */ if (!test_and_clear_bit((7 + adapter->portnum), &our_int)) return IRQ_NONE; /* claim interrupt */ writel((our_int & 0xffffffff), adapter->crb_int_state_reg); /* clear interrupt */ netxen_nic_disable_int(sds_ring); } writel(0xffffffff, adapter->tgt_status_reg); /* read twice to ensure write is flushed */ readl(adapter->isr_int_vec); readl(adapter->isr_int_vec); napi_schedule(&sds_ring->napi); return IRQ_HANDLED; } static irqreturn_t netxen_msi_intr(int irq, void *data) { struct nx_host_sds_ring *sds_ring = data; struct netxen_adapter *adapter = sds_ring->adapter; /* clear interrupt */ writel(0xffffffff, adapter->tgt_status_reg); napi_schedule(&sds_ring->napi); return IRQ_HANDLED; } static irqreturn_t netxen_msix_intr(int irq, void *data) { struct nx_host_sds_ring *sds_ring = data; napi_schedule(&sds_ring->napi); return IRQ_HANDLED; } static int netxen_nic_poll(struct napi_struct *napi, int budget) { struct nx_host_sds_ring *sds_ring = container_of(napi, struct nx_host_sds_ring, napi); struct netxen_adapter *adapter = sds_ring->adapter; int tx_complete; int work_done; tx_complete = netxen_process_cmd_ring(adapter); work_done = netxen_process_rcv_ring(sds_ring, budget); if (!tx_complete) work_done = budget; if (work_done < budget) { napi_complete_done(&sds_ring->napi, work_done); if (test_bit(__NX_DEV_UP, &adapter->state)) netxen_nic_enable_int(sds_ring); } return work_done; } static int nx_incr_dev_ref_cnt(struct netxen_adapter *adapter) { int count; if (netxen_api_lock(adapter)) return -EIO; count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); netxen_api_unlock(adapter); return count; } static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter) { int count, state; if (netxen_api_lock(adapter)) return -EIO; count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); WARN_ON(count == 0); NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count); state = NXRD32(adapter, NX_CRB_DEV_STATE); if (count == 0 && state != NX_DEV_FAILED) NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD); netxen_api_unlock(adapter); return count; } static int nx_dev_request_aer(struct netxen_adapter *adapter) { u32 state; int ret = -EINVAL; if (netxen_api_lock(adapter)) return ret; state = NXRD32(adapter, NX_CRB_DEV_STATE); if (state == NX_DEV_NEED_AER) ret = 0; else if (state == NX_DEV_READY) { NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER); ret = 0; } netxen_api_unlock(adapter); return ret; } int nx_dev_request_reset(struct netxen_adapter *adapter) { u32 state; int ret = -EINVAL; if (netxen_api_lock(adapter)) return ret; state = NXRD32(adapter, NX_CRB_DEV_STATE); if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) ret = 0; else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) { NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET); adapter->flags |= NETXEN_FW_RESET_OWNER; ret = 0; } netxen_api_unlock(adapter); return ret; } static int netxen_can_start_firmware(struct netxen_adapter *adapter) { int count; int can_start = 0; if (netxen_api_lock(adapter)) { nx_incr_dev_ref_cnt(adapter); return -1; } count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); if ((count < 0) || (count >= NX_MAX_PCI_FUNC)) count = 0; if (count == 0) { can_start = 1; NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_INITALIZING); } NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); netxen_api_unlock(adapter); return can_start; } static void netxen_schedule_work(struct netxen_adapter *adapter, work_func_t func, int delay) { INIT_DELAYED_WORK(&adapter->fw_work, func); schedule_delayed_work(&adapter->fw_work, delay); } static void netxen_cancel_fw_work(struct netxen_adapter *adapter) { while (test_and_set_bit(__NX_RESETTING, &adapter->state)) msleep(10); cancel_delayed_work_sync(&adapter->fw_work); } static void netxen_attach_work(struct work_struct *work) { struct netxen_adapter *adapter = container_of(work, struct netxen_adapter, fw_work.work); struct net_device *netdev = adapter->netdev; int err = 0; if (netif_running(netdev)) { err = netxen_nic_attach(adapter); if (err) goto done; err = netxen_nic_up(adapter, netdev); if (err) { netxen_nic_detach(adapter); goto done; } netxen_restore_indev_addr(netdev, NETDEV_UP); } netif_device_attach(netdev); done: adapter->fw_fail_cnt = 0; clear_bit(__NX_RESETTING, &adapter->state); netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); } static void netxen_fwinit_work(struct work_struct *work) { struct netxen_adapter *adapter = container_of(work, struct netxen_adapter, fw_work.work); int dev_state; int count; dev_state = NXRD32(adapter, NX_CRB_DEV_STATE); if (adapter->flags & NETXEN_FW_RESET_OWNER) { count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); WARN_ON(count == 0); if (count == 1) { if (adapter->mdump.md_enabled) { rtnl_lock(); netxen_dump_fw(adapter); rtnl_unlock(); } adapter->flags &= ~NETXEN_FW_RESET_OWNER; if (netxen_api_lock(adapter)) { clear_bit(__NX_RESETTING, &adapter->state); NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_FAILED); return; } count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count); NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD); dev_state = NX_DEV_COLD; netxen_api_unlock(adapter); } } switch (dev_state) { case NX_DEV_COLD: case NX_DEV_READY: if (!netxen_start_firmware(adapter)) { netxen_schedule_work(adapter, netxen_attach_work, 0); return; } break; case NX_DEV_NEED_RESET: case NX_DEV_INITALIZING: netxen_schedule_work(adapter, netxen_fwinit_work, 2 * FW_POLL_DELAY); return; case NX_DEV_FAILED: default: nx_incr_dev_ref_cnt(adapter); break; } if (netxen_api_lock(adapter)) { clear_bit(__NX_RESETTING, &adapter->state); return; } NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_FAILED); netxen_api_unlock(adapter); dev_err(&adapter->pdev->dev, "%s: Device initialization Failed\n", adapter->netdev->name); clear_bit(__NX_RESETTING, &adapter->state); } static void netxen_detach_work(struct work_struct *work) { struct netxen_adapter *adapter = container_of(work, struct netxen_adapter, fw_work.work); struct net_device *netdev = adapter->netdev; int ref_cnt = 0, delay; u32 status; netif_device_detach(netdev); netxen_nic_down(adapter, netdev); rtnl_lock(); netxen_nic_detach(adapter); rtnl_unlock(); status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); if (status & NX_RCODE_FATAL_ERROR) goto err_ret; if (adapter->temp == NX_TEMP_PANIC) goto err_ret; if (!(adapter->flags & NETXEN_FW_RESET_OWNER)) ref_cnt = nx_decr_dev_ref_cnt(adapter); if (ref_cnt == -EIO) goto err_ret; delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY); adapter->fw_wait_cnt = 0; netxen_schedule_work(adapter, netxen_fwinit_work, delay); return; err_ret: clear_bit(__NX_RESETTING, &adapter->state); } static int netxen_check_health(struct netxen_adapter *adapter) { u32 state, heartbit; u32 peg_status; struct net_device *netdev = adapter->netdev; state = NXRD32(adapter, NX_CRB_DEV_STATE); if (state == NX_DEV_NEED_AER) return 0; if (netxen_nic_check_temp(adapter)) goto detach; if (adapter->need_fw_reset) { if (nx_dev_request_reset(adapter)) return 0; goto detach; } /* NX_DEV_NEED_RESET, this state can be marked in two cases * 1. Tx timeout 2. Fw hang * Send request to destroy context in case of tx timeout only * and doesn't required in case of Fw hang */ if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) { adapter->need_fw_reset = 1; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) goto detach; } if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 0; heartbit = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); if (heartbit != adapter->heartbit) { adapter->heartbit = heartbit; adapter->fw_fail_cnt = 0; if (adapter->need_fw_reset) goto detach; return 0; } if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) return 0; if (nx_dev_request_reset(adapter)) return 0; clear_bit(__NX_FW_ATTACHED, &adapter->state); dev_err(&netdev->dev, "firmware hang detected\n"); peg_status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n" "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" "PEG_NET_4_PC: 0x%x\n", peg_status, NXRD32(adapter, NETXEN_PEG_HALT_STATUS2), NXRD32(adapter, NETXEN_CRB_PEG_NET_0 + 0x3c), NXRD32(adapter, NETXEN_CRB_PEG_NET_1 + 0x3c), NXRD32(adapter, NETXEN_CRB_PEG_NET_2 + 0x3c), NXRD32(adapter, NETXEN_CRB_PEG_NET_3 + 0x3c), NXRD32(adapter, NETXEN_CRB_PEG_NET_4 + 0x3c)); if (NX_FWERROR_PEGSTAT1(peg_status) == 0x67) dev_err(&adapter->pdev->dev, "Firmware aborted with error code 0x00006700. " "Device is being reset.\n"); detach: if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && !test_and_set_bit(__NX_RESETTING, &adapter->state)) netxen_schedule_work(adapter, netxen_detach_work, 0); return 1; } static void netxen_fw_poll_work(struct work_struct *work) { struct netxen_adapter *adapter = container_of(work, struct netxen_adapter, fw_work.work); if (test_bit(__NX_RESETTING, &adapter->state)) goto reschedule; if (test_bit(__NX_DEV_UP, &adapter->state) && !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) { if (!adapter->has_link_events) { netxen_nic_handle_phy_intr(adapter); if (adapter->link_changed) netxen_nic_set_link_parameters(adapter); } } if (netxen_check_health(adapter)) return; reschedule: netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); } static ssize_t netxen_store_bridged_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *net = to_net_dev(dev); struct netxen_adapter *adapter = netdev_priv(net); unsigned long new; int ret = -EINVAL; if (!(adapter->capabilities & NX_FW_CAPABILITY_BDG)) goto err_out; if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) goto err_out; if (kstrtoul(buf, 2, &new)) goto err_out; if (!netxen_config_bridged_mode(adapter, !!new)) ret = len; err_out: return ret; } static ssize_t netxen_show_bridged_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *net = to_net_dev(dev); struct netxen_adapter *adapter; int bridged_mode = 0; adapter = netdev_priv(net); if (adapter->capabilities & NX_FW_CAPABILITY_BDG) bridged_mode = !!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED); return sprintf(buf, "%d\n", bridged_mode); } static const struct device_attribute dev_attr_bridged_mode = { .attr = { .name = "bridged_mode", .mode = 0644 }, .show = netxen_show_bridged_mode, .store = netxen_store_bridged_mode, }; static ssize_t netxen_store_diag_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct netxen_adapter *adapter = dev_get_drvdata(dev); unsigned long new; if (kstrtoul(buf, 2, &new)) return -EINVAL; if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) adapter->flags ^= NETXEN_NIC_DIAG_ENABLED; return len; } static ssize_t netxen_show_diag_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct netxen_adapter *adapter = dev_get_drvdata(dev); return sprintf(buf, "%d\n", !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)); } static const struct device_attribute dev_attr_diag_mode = { .attr = { .name = "diag_mode", .mode = 0644 }, .show = netxen_show_diag_mode, .store = netxen_store_diag_mode, }; static int netxen_sysfs_validate_crb(struct netxen_adapter *adapter, loff_t offset, size_t size) { size_t crb_size = 4; if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) return -EIO; if (offset < NETXEN_PCI_CRBSPACE) { if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return -EINVAL; if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, NETXEN_PCI_CAMQM_2M_END)) crb_size = 8; else return -EINVAL; } if ((size != crb_size) || (offset & (crb_size-1))) return -EINVAL; return 0; } static ssize_t netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u32 data; u64 qmdata; int ret; ret = netxen_sysfs_validate_crb(adapter, offset, size); if (ret != 0) return ret; if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, NETXEN_PCI_CAMQM_2M_END)) { netxen_pci_camqm_read_2M(adapter, offset, &qmdata); memcpy(buf, &qmdata, size); } else { data = NXRD32(adapter, offset); memcpy(buf, &data, size); } return size; } static ssize_t netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u32 data; u64 qmdata; int ret; ret = netxen_sysfs_validate_crb(adapter, offset, size); if (ret != 0) return ret; if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, NETXEN_PCI_CAMQM_2M_END)) { memcpy(&qmdata, buf, size); netxen_pci_camqm_write_2M(adapter, offset, qmdata); } else { memcpy(&data, buf, size); NXWR32(adapter, offset, data); } return size; } static int netxen_sysfs_validate_mem(struct netxen_adapter *adapter, loff_t offset, size_t size) { if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) return -EIO; if ((size != 8) || (offset & 0x7)) return -EIO; return 0; } static ssize_t netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u64 data; int ret; ret = netxen_sysfs_validate_mem(adapter, offset, size); if (ret != 0) return ret; if (adapter->pci_mem_read(adapter, offset, &data)) return -EIO; memcpy(buf, &data, size); return size; } static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u64 data; int ret; ret = netxen_sysfs_validate_mem(adapter, offset, size); if (ret != 0) return ret; memcpy(&data, buf, size); if (adapter->pci_mem_write(adapter, offset, data)) return -EIO; return size; } static const struct bin_attribute bin_attr_crb = { .attr = { .name = "crb", .mode = 0644 }, .size = 0, .read = netxen_sysfs_read_crb, .write = netxen_sysfs_write_crb, }; static const struct bin_attribute bin_attr_mem = { .attr = { .name = "mem", .mode = 0644 }, .size = 0, .read = netxen_sysfs_read_mem, .write = netxen_sysfs_write_mem, }; static ssize_t netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = kobj_to_dev(kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); struct net_device *netdev = adapter->netdev; struct netxen_dimm_cfg dimm; u8 dw, rows, cols, banks, ranks; u32 val; if (size < attr->size) { netdev_err(netdev, "Invalid size\n"); return -EINVAL; } memset(&dimm, 0, sizeof(struct netxen_dimm_cfg)); val = NXRD32(adapter, NETXEN_DIMM_CAPABILITY); /* Checks if DIMM info is valid. */ if (val & NETXEN_DIMM_VALID_FLAG) { netdev_err(netdev, "Invalid DIMM flag\n"); dimm.presence = 0xff; goto out; } rows = NETXEN_DIMM_NUMROWS(val); cols = NETXEN_DIMM_NUMCOLS(val); ranks = NETXEN_DIMM_NUMRANKS(val); banks = NETXEN_DIMM_NUMBANKS(val); dw = NETXEN_DIMM_DATAWIDTH(val); dimm.presence = (val & NETXEN_DIMM_PRESENT); /* Checks if DIMM info is present. */ if (!dimm.presence) { netdev_err(netdev, "DIMM not present\n"); goto out; } dimm.dimm_type = NETXEN_DIMM_TYPE(val); switch (dimm.dimm_type) { case NETXEN_DIMM_TYPE_RDIMM: case NETXEN_DIMM_TYPE_UDIMM: case NETXEN_DIMM_TYPE_SO_DIMM: case NETXEN_DIMM_TYPE_Micro_DIMM: case NETXEN_DIMM_TYPE_Mini_RDIMM: case NETXEN_DIMM_TYPE_Mini_UDIMM: break; default: netdev_err(netdev, "Invalid DIMM type %x\n", dimm.dimm_type); goto out; } if (val & NETXEN_DIMM_MEMTYPE_DDR2_SDRAM) dimm.mem_type = NETXEN_DIMM_MEM_DDR2_SDRAM; else dimm.mem_type = NETXEN_DIMM_MEMTYPE(val); if (val & NETXEN_DIMM_SIZE) { dimm.size = NETXEN_DIMM_STD_MEM_SIZE; goto out; } if (!rows) { netdev_err(netdev, "Invalid no of rows %x\n", rows); goto out; } if (!cols) { netdev_err(netdev, "Invalid no of columns %x\n", cols); goto out; } if (!banks) { netdev_err(netdev, "Invalid no of banks %x\n", banks); goto out; } ranks += 1; switch (dw) { case 0x0: dw = 32; break; case 0x1: dw = 33; break; case 0x2: dw = 36; break; case 0x3: dw = 64; break; case 0x4: dw = 72; break; case 0x5: dw = 80; break; case 0x6: dw = 128; break; case 0x7: dw = 144; break; default: netdev_err(netdev, "Invalid data-width %x\n", dw); goto out; } dimm.size = ((1 << rows) * (1 << cols) * dw * banks * ranks) / 8; /* Size returned in MB. */ dimm.size = (dimm.size) / 0x100000; out: memcpy(buf, &dimm, sizeof(struct netxen_dimm_cfg)); return sizeof(struct netxen_dimm_cfg); } static const struct bin_attribute bin_attr_dimm = { .attr = { .name = "dimm", .mode = 0644 }, .size = sizeof(struct netxen_dimm_cfg), .read = netxen_sysfs_read_dimm, }; static void netxen_create_sysfs_entries(struct netxen_adapter *adapter) { struct device *dev = &adapter->pdev->dev; if (adapter->capabilities & NX_FW_CAPABILITY_BDG) { /* bridged_mode control */ if (device_create_file(dev, &dev_attr_bridged_mode)) { dev_warn(dev, "failed to create bridged_mode sysfs entry\n"); } } } static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter) { struct device *dev = &adapter->pdev->dev; if (adapter->capabilities & NX_FW_CAPABILITY_BDG) device_remove_file(dev, &dev_attr_bridged_mode); } static void netxen_create_diag_entries(struct netxen_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct device *dev; dev = &pdev->dev; if (device_create_file(dev, &dev_attr_diag_mode)) dev_info(dev, "failed to create diag_mode sysfs entry\n"); if (device_create_bin_file(dev, &bin_attr_crb)) dev_info(dev, "failed to create crb sysfs entry\n"); if (device_create_bin_file(dev, &bin_attr_mem)) dev_info(dev, "failed to create mem sysfs entry\n"); if (device_create_bin_file(dev, &bin_attr_dimm)) dev_info(dev, "failed to create dimm sysfs entry\n"); } static void netxen_remove_diag_entries(struct netxen_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct device *dev = &pdev->dev; device_remove_file(dev, &dev_attr_diag_mode); device_remove_bin_file(dev, &bin_attr_crb); device_remove_bin_file(dev, &bin_attr_mem); device_remove_bin_file(dev, &bin_attr_dimm); } #ifdef CONFIG_INET #define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops) static int netxen_destip_supported(struct netxen_adapter *adapter) { if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 0; if (adapter->ahw.cut_through) return 0; return 1; } static void netxen_free_ip_list(struct netxen_adapter *adapter, bool master) { struct nx_ip_list *cur, *tmp_cur; list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) { if (master) { if (cur->master) { netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); list_del(&cur->list); kfree(cur); } } else { netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); list_del(&cur->list); kfree(cur); } } } static bool netxen_list_config_ip(struct netxen_adapter *adapter, struct in_ifaddr *ifa, unsigned long event) { struct net_device *dev; struct nx_ip_list *cur, *tmp_cur; struct list_head *head; bool ret = false; dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; if (dev == NULL) goto out; switch (event) { case NX_IP_UP: list_for_each(head, &adapter->ip_list) { cur = list_entry(head, struct nx_ip_list, list); if (cur->ip_addr == ifa->ifa_address) goto out; } cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC); if (cur == NULL) goto out; if (is_vlan_dev(dev)) dev = vlan_dev_real_dev(dev); cur->master = !!netif_is_bond_master(dev); cur->ip_addr = ifa->ifa_address; list_add_tail(&cur->list, &adapter->ip_list); netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP); ret = true; break; case NX_IP_DOWN: list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) { if (cur->ip_addr == ifa->ifa_address) { list_del(&cur->list); kfree(cur); netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN); ret = true; break; } } } out: return ret; } static void netxen_config_indev_addr(struct netxen_adapter *adapter, struct net_device *dev, unsigned long event) { struct in_device *indev; struct in_ifaddr *ifa; if (!netxen_destip_supported(adapter)) return; indev = in_dev_get(dev); if (!indev) return; rcu_read_lock(); in_dev_for_each_ifa_rcu(ifa, indev) { switch (event) { case NETDEV_UP: netxen_list_config_ip(adapter, ifa, NX_IP_UP); break; case NETDEV_DOWN: netxen_list_config_ip(adapter, ifa, NX_IP_DOWN); break; default: break; } } rcu_read_unlock(); in_dev_put(indev); } static void netxen_restore_indev_addr(struct net_device *netdev, unsigned long event) { struct netxen_adapter *adapter = netdev_priv(netdev); struct nx_ip_list *pos, *tmp_pos; unsigned long ip_event; ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; netxen_config_indev_addr(adapter, netdev, event); list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) { netxen_config_ipaddr(adapter, pos->ip_addr, ip_event); } } static inline bool netxen_config_checkdev(struct net_device *dev) { struct netxen_adapter *adapter; if (!is_netxen_netdev(dev)) return false; adapter = netdev_priv(dev); if (!adapter) return false; if (!netxen_destip_supported(adapter)) return false; if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return false; return true; } /** * netxen_config_master - configure addresses based on master * @dev: netxen device * @event: netdev event */ static void netxen_config_master(struct net_device *dev, unsigned long event) { struct net_device *master, *slave; struct netxen_adapter *adapter = netdev_priv(dev); rcu_read_lock(); master = netdev_master_upper_dev_get_rcu(dev); /* * This is the case where the netxen nic is being * enslaved and is dev_open()ed in bond_enslave() * Now we should program the bond's (and its vlans') * addresses in the netxen NIC. */ if (master && netif_is_bond_master(master) && !netif_is_bond_slave(dev)) { netxen_config_indev_addr(adapter, master, event); for_each_netdev_rcu(&init_net, slave) if (is_vlan_dev(slave) && vlan_dev_real_dev(slave) == master) netxen_config_indev_addr(adapter, slave, event); } rcu_read_unlock(); /* * This is the case where the netxen nic is being * released and is dev_close()ed in bond_release() * just before IFF_BONDING is stripped. */ if (!master && dev->priv_flags & IFF_BONDING) netxen_free_ip_list(adapter, true); } static int netxen_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netxen_adapter *adapter; struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net_device *orig_dev = dev; struct net_device *slave; recheck: if (dev == NULL) goto done; if (is_vlan_dev(dev)) { dev = vlan_dev_real_dev(dev); goto recheck; } if (event == NETDEV_UP || event == NETDEV_DOWN) { /* If this is a bonding device, look for netxen-based slaves*/ if (netif_is_bond_master(dev)) { rcu_read_lock(); for_each_netdev_in_bond_rcu(dev, slave) { if (!netxen_config_checkdev(slave)) continue; adapter = netdev_priv(slave); netxen_config_indev_addr(adapter, orig_dev, event); } rcu_read_unlock(); } else { if (!netxen_config_checkdev(dev)) goto done; adapter = netdev_priv(dev); /* Act only if the actual netxen is the target */ if (orig_dev == dev) netxen_config_master(dev, event); netxen_config_indev_addr(adapter, orig_dev, event); } } done: return NOTIFY_DONE; } static int netxen_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netxen_adapter *adapter; struct net_device *dev, *slave; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; unsigned long ip_event; dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; recheck: if (dev == NULL) goto done; if (is_vlan_dev(dev)) { dev = vlan_dev_real_dev(dev); goto recheck; } if (event == NETDEV_UP || event == NETDEV_DOWN) { /* If this is a bonding device, look for netxen-based slaves*/ if (netif_is_bond_master(dev)) { rcu_read_lock(); for_each_netdev_in_bond_rcu(dev, slave) { if (!netxen_config_checkdev(slave)) continue; adapter = netdev_priv(slave); netxen_list_config_ip(adapter, ifa, ip_event); } rcu_read_unlock(); } else { if (!netxen_config_checkdev(dev)) goto done; adapter = netdev_priv(dev); netxen_list_config_ip(adapter, ifa, ip_event); } } done: return NOTIFY_DONE; } static struct notifier_block netxen_netdev_cb = { .notifier_call = netxen_netdev_event, }; static struct notifier_block netxen_inetaddr_cb = { .notifier_call = netxen_inetaddr_event, }; #else static void netxen_restore_indev_addr(struct net_device *dev, unsigned long event) { } static void netxen_free_ip_list(struct netxen_adapter *adapter, bool master) { } #endif static const struct pci_error_handlers netxen_err_handler = { .error_detected = netxen_io_error_detected, .slot_reset = netxen_io_slot_reset, }; static SIMPLE_DEV_PM_OPS(netxen_nic_pm_ops, netxen_nic_suspend, netxen_nic_resume); static struct pci_driver netxen_driver = { .name = netxen_nic_driver_name, .id_table = netxen_pci_tbl, .probe = netxen_nic_probe, .remove = netxen_nic_remove, .driver.pm = &netxen_nic_pm_ops, .shutdown = netxen_nic_shutdown, .err_handler = &netxen_err_handler }; static int __init netxen_init_module(void) { printk(KERN_INFO "%s\n", netxen_nic_driver_string); #ifdef CONFIG_INET register_netdevice_notifier(&netxen_netdev_cb); register_inetaddr_notifier(&netxen_inetaddr_cb); #endif return pci_register_driver(&netxen_driver); } module_init(netxen_init_module); static void __exit netxen_exit_module(void) { pci_unregister_driver(&netxen_driver); #ifdef CONFIG_INET unregister_inetaddr_notifier(&netxen_inetaddr_cb); unregister_netdevice_notifier(&netxen_netdev_cb); #endif } module_exit(netxen_exit_module);
linux-master
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2003 - 2009 NetXen, Inc. * Copyright (C) 2009 - QLogic Corporation. * All rights reserved. */ #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/if_vlan.h> #include <net/checksum.h> #include "netxen_nic.h" #include "netxen_nic_hw.h" struct crb_addr_pair { u32 addr; u32 data; }; #define NETXEN_MAX_CRB_XFORM 60 static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; #define NETXEN_ADDR_ERROR (0xffffffff) #define crb_addr_transform(name) \ crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \ NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20 #define NETXEN_NIC_XDMA_RESET 0x8000ff static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, struct nx_host_rds_ring *rds_ring); static int netxen_p3_has_mn(struct netxen_adapter *adapter); static void crb_addr_transform_setup(void) { crb_addr_transform(XDMA); crb_addr_transform(TIMR); crb_addr_transform(SRE); crb_addr_transform(SQN3); crb_addr_transform(SQN2); crb_addr_transform(SQN1); crb_addr_transform(SQN0); crb_addr_transform(SQS3); crb_addr_transform(SQS2); crb_addr_transform(SQS1); crb_addr_transform(SQS0); crb_addr_transform(RPMX7); crb_addr_transform(RPMX6); crb_addr_transform(RPMX5); crb_addr_transform(RPMX4); crb_addr_transform(RPMX3); crb_addr_transform(RPMX2); crb_addr_transform(RPMX1); crb_addr_transform(RPMX0); crb_addr_transform(ROMUSB); crb_addr_transform(SN); crb_addr_transform(QMN); crb_addr_transform(QMS); crb_addr_transform(PGNI); crb_addr_transform(PGND); crb_addr_transform(PGN3); crb_addr_transform(PGN2); crb_addr_transform(PGN1); crb_addr_transform(PGN0); crb_addr_transform(PGSI); crb_addr_transform(PGSD); crb_addr_transform(PGS3); crb_addr_transform(PGS2); crb_addr_transform(PGS1); crb_addr_transform(PGS0); crb_addr_transform(PS); crb_addr_transform(PH); crb_addr_transform(NIU); crb_addr_transform(I2Q); crb_addr_transform(EG); crb_addr_transform(MN); crb_addr_transform(MS); crb_addr_transform(CAS2); crb_addr_transform(CAS1); crb_addr_transform(CAS0); crb_addr_transform(CAM); crb_addr_transform(C2C1); crb_addr_transform(C2C0); crb_addr_transform(SMB); crb_addr_transform(OCM0); crb_addr_transform(I2C0); } void netxen_release_rx_buffers(struct netxen_adapter *adapter) { struct netxen_recv_context *recv_ctx; struct nx_host_rds_ring *rds_ring; struct netxen_rx_buffer *rx_buf; int i, ring; recv_ctx = &adapter->recv_ctx; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; for (i = 0; i < rds_ring->num_desc; ++i) { rx_buf = &(rds_ring->rx_buf_arr[i]); if (rx_buf->state == NETXEN_BUFFER_FREE) continue; dma_unmap_single(&adapter->pdev->dev, rx_buf->dma, rds_ring->dma_size, DMA_FROM_DEVICE); if (rx_buf->skb != NULL) dev_kfree_skb_any(rx_buf->skb); } } } void netxen_release_tx_buffers(struct netxen_adapter *adapter) { struct netxen_cmd_buffer *cmd_buf; struct netxen_skb_frag *buffrag; int i, j; struct nx_host_tx_ring *tx_ring = adapter->tx_ring; spin_lock_bh(&adapter->tx_clean_lock); cmd_buf = tx_ring->cmd_buf_arr; for (i = 0; i < tx_ring->num_desc; i++) { buffrag = cmd_buf->frag_array; if (buffrag->dma) { dma_unmap_single(&adapter->pdev->dev, buffrag->dma, buffrag->length, DMA_TO_DEVICE); buffrag->dma = 0ULL; } for (j = 1; j < cmd_buf->frag_count; j++) { buffrag++; if (buffrag->dma) { dma_unmap_page(&adapter->pdev->dev, buffrag->dma, buffrag->length, DMA_TO_DEVICE); buffrag->dma = 0ULL; } } if (cmd_buf->skb) { dev_kfree_skb_any(cmd_buf->skb); cmd_buf->skb = NULL; } cmd_buf++; } spin_unlock_bh(&adapter->tx_clean_lock); } void netxen_free_sw_resources(struct netxen_adapter *adapter) { struct netxen_recv_context *recv_ctx; struct nx_host_rds_ring *rds_ring; struct nx_host_tx_ring *tx_ring; int ring; recv_ctx = &adapter->recv_ctx; if (recv_ctx->rds_rings == NULL) goto skip_rds; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; vfree(rds_ring->rx_buf_arr); rds_ring->rx_buf_arr = NULL; } kfree(recv_ctx->rds_rings); skip_rds: if (adapter->tx_ring == NULL) return; tx_ring = adapter->tx_ring; vfree(tx_ring->cmd_buf_arr); kfree(tx_ring); adapter->tx_ring = NULL; } int netxen_alloc_sw_resources(struct netxen_adapter *adapter) { struct netxen_recv_context *recv_ctx; struct nx_host_rds_ring *rds_ring; struct nx_host_sds_ring *sds_ring; struct nx_host_tx_ring *tx_ring; struct netxen_rx_buffer *rx_buf; int ring, i; struct netxen_cmd_buffer *cmd_buf_arr; struct net_device *netdev = adapter->netdev; tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL); if (tx_ring == NULL) return -ENOMEM; adapter->tx_ring = tx_ring; tx_ring->num_desc = adapter->num_txd; tx_ring->txq = netdev_get_tx_queue(netdev, 0); cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); if (cmd_buf_arr == NULL) goto err_out; tx_ring->cmd_buf_arr = cmd_buf_arr; recv_ctx = &adapter->recv_ctx; rds_ring = kcalloc(adapter->max_rds_rings, sizeof(struct nx_host_rds_ring), GFP_KERNEL); if (rds_ring == NULL) goto err_out; recv_ctx->rds_rings = rds_ring; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; switch (ring) { case RCV_RING_NORMAL: rds_ring->num_desc = adapter->num_rxd; if (adapter->ahw.cut_through) { rds_ring->dma_size = NX_CT_DEFAULT_RX_BUF_LEN; rds_ring->skb_size = NX_CT_DEFAULT_RX_BUF_LEN; } else { if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) rds_ring->dma_size = NX_P3_RX_BUF_MAX_LEN; else rds_ring->dma_size = NX_P2_RX_BUF_MAX_LEN; rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; } break; case RCV_RING_JUMBO: rds_ring->num_desc = adapter->num_jumbo_rxd; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) rds_ring->dma_size = NX_P3_RX_JUMBO_BUF_MAX_LEN; else rds_ring->dma_size = NX_P2_RX_JUMBO_BUF_MAX_LEN; if (adapter->capabilities & NX_CAP0_HW_LRO) rds_ring->dma_size += NX_LRO_BUFFER_EXTRA; rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; break; case RCV_RING_LRO: rds_ring->num_desc = adapter->num_lro_rxd; rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH; rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; break; } rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); if (rds_ring->rx_buf_arr == NULL) /* free whatever was already allocated */ goto err_out; INIT_LIST_HEAD(&rds_ring->free_list); /* * Now go through all of them, set reference handles * and put them in the queues. */ rx_buf = rds_ring->rx_buf_arr; for (i = 0; i < rds_ring->num_desc; i++) { list_add_tail(&rx_buf->list, &rds_ring->free_list); rx_buf->ref_handle = i; rx_buf->state = NETXEN_BUFFER_FREE; rx_buf++; } spin_lock_init(&rds_ring->lock); } for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; sds_ring->irq = adapter->msix_entries[ring].vector; sds_ring->adapter = adapter; sds_ring->num_desc = adapter->num_rxd; for (i = 0; i < NUM_RCV_DESC_RINGS; i++) INIT_LIST_HEAD(&sds_ring->free_list[i]); } return 0; err_out: netxen_free_sw_resources(adapter); return -ENOMEM; } /* * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB * address to external PCI CRB address. */ static u32 netxen_decode_crb_addr(u32 addr) { int i; u32 base_addr, offset, pci_base; crb_addr_transform_setup(); pci_base = NETXEN_ADDR_ERROR; base_addr = addr & 0xfff00000; offset = addr & 0x000fffff; for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) { if (crb_addr_xform[i] == base_addr) { pci_base = i << 20; break; } } if (pci_base == NETXEN_ADDR_ERROR) return pci_base; else return pci_base + offset; } #define NETXEN_MAX_ROM_WAIT_USEC 100 static int netxen_wait_rom_done(struct netxen_adapter *adapter) { long timeout = 0; long done = 0; cond_resched(); while (done == 0) { done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS); done &= 2; if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) { dev_err(&adapter->pdev->dev, "Timeout reached waiting for rom done"); return -EIO; } udelay(1); } return 0; } static int do_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) { NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb); if (netxen_wait_rom_done(adapter)) { printk("Error waiting for rom done\n"); return -EIO; } /* reset abyte_cnt and dummy_byte_cnt */ NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); udelay(10); NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA); return 0; } static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr, u8 *bytes, size_t size) { int addridx; int ret = 0; for (addridx = addr; addridx < (addr + size); addridx += 4) { int v; ret = do_rom_fast_read(adapter, addridx, &v); if (ret != 0) break; *(__le32 *)bytes = cpu_to_le32(v); bytes += 4; } return ret; } int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, u8 *bytes, size_t size) { int ret; ret = netxen_rom_lock(adapter); if (ret < 0) return ret; ret = do_rom_fast_read_words(adapter, addr, bytes, size); netxen_rom_unlock(adapter); return ret; } int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) { int ret; if (netxen_rom_lock(adapter) != 0) return -EIO; ret = do_rom_fast_read(adapter, addr, valp); netxen_rom_unlock(adapter); return ret; } #define NETXEN_BOARDTYPE 0x4008 #define NETXEN_BOARDNUM 0x400c #define NETXEN_CHIPNUM 0x4010 int netxen_pinit_from_rom(struct netxen_adapter *adapter) { int addr, val; int i, n, init_delay = 0; struct crb_addr_pair *buf; unsigned offset; u32 off; /* resetall */ netxen_rom_lock(adapter); NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xfeffffff); netxen_rom_unlock(adapter); if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { if (netxen_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || netxen_rom_fast_read(adapter, 4, &n) != 0) { printk(KERN_ERR "%s: ERROR Reading crb_init area: " "n: %08x\n", netxen_nic_driver_name, n); return -EIO; } offset = n & 0xffffU; n = (n >> 16) & 0xffffU; } else { if (netxen_rom_fast_read(adapter, 0, &n) != 0 || !(n & 0x80000000)) { printk(KERN_ERR "%s: ERROR Reading crb_init area: " "n: %08x\n", netxen_nic_driver_name, n); return -EIO; } offset = 1; n &= ~0x80000000; } if (n >= 1024) { printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not" " initialized.\n", __func__, n); return -EIO; } buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); if (buf == NULL) return -ENOMEM; for (i = 0; i < n; i++) { if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { kfree(buf); return -EIO; } buf[i].addr = addr; buf[i].data = val; } for (i = 0; i < n; i++) { off = netxen_decode_crb_addr(buf[i].addr); if (off == NETXEN_ADDR_ERROR) { printk(KERN_ERR"CRB init value out of range %x\n", buf[i].addr); continue; } off += NETXEN_PCI_CRBSPACE; if (off & 1) continue; /* skipping cold reboot MAGIC */ if (off == NETXEN_CAM_RAM(0x1fc)) continue; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { if (off == (NETXEN_CRB_I2C0 + 0x1c)) continue; /* do not reset PCI */ if (off == (ROMUSB_GLB + 0xbc)) continue; if (off == (ROMUSB_GLB + 0xa8)) continue; if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ continue; if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ continue; if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ continue; if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET) continue; if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) && !NX_IS_REVISION_P3P(adapter->ahw.revision_id)) buf[i].data = 0x1020; /* skip the function enable register */ if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION)) continue; if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2)) continue; if ((off & 0x0ff00000) == NETXEN_CRB_SMB) continue; } init_delay = 1; /* After writing this register, HW needs time for CRB */ /* to quiet down (else crb_window returns 0xffffffff) */ if (off == NETXEN_ROMUSB_GLB_SW_RESET) { init_delay = 1000; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { /* hold xdma in reset also */ buf[i].data = NETXEN_NIC_XDMA_RESET; buf[i].data = 0x8000ff; } } NXWR32(adapter, off, buf[i].data); msleep(init_delay); } kfree(buf); /* disable_peg_cache_all */ /* unreset_net_cache */ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f)); } /* p2dn replyCount */ NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e); /* disable_peg_cache 0 */ NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8); /* disable_peg_cache 1 */ NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8); /* peg_clr_all */ /* peg_clr 0 */ NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0); NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0); /* peg_clr 1 */ NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0); NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0); /* peg_clr 2 */ NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0); NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0); /* peg_clr 3 */ NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0); NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0); return 0; } static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section) { uint32_t i; struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; __le32 entries = cpu_to_le32(directory->num_entries); for (i = 0; i < entries; i++) { __le32 offs = cpu_to_le32(directory->findex) + (i * cpu_to_le32(directory->entry_size)); __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8)); if (tab_type == section) return (struct uni_table_desc *) &unirom[offs]; } return NULL; } #define QLCNIC_FILEHEADER_SIZE (14 * 4) static int netxen_nic_validate_header(struct netxen_adapter *adapter) { const u8 *unirom = adapter->fw->data; struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; u32 fw_file_size = adapter->fw->size; u32 tab_size; __le32 entries; __le32 entry_size; if (fw_file_size < QLCNIC_FILEHEADER_SIZE) return -EINVAL; entries = cpu_to_le32(directory->num_entries); entry_size = cpu_to_le32(directory->entry_size); tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); if (fw_file_size < tab_size) return -EINVAL; return 0; } static int netxen_nic_validate_bootld(struct netxen_adapter *adapter) { struct uni_table_desc *tab_desc; struct uni_data_desc *descr; const u8 *unirom = adapter->fw->data; __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + NX_UNI_BOOTLD_IDX_OFF)); u32 offs; u32 tab_size; u32 data_size; tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD); if (!tab_desc) return -EINVAL; tab_size = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); if (adapter->fw->size < tab_size) return -EINVAL; offs = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * (idx)); descr = (struct uni_data_desc *)&unirom[offs]; data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); if (adapter->fw->size < data_size) return -EINVAL; return 0; } static int netxen_nic_validate_fw(struct netxen_adapter *adapter) { struct uni_table_desc *tab_desc; struct uni_data_desc *descr; const u8 *unirom = adapter->fw->data; __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + NX_UNI_FIRMWARE_IDX_OFF)); u32 offs; u32 tab_size; u32 data_size; tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW); if (!tab_desc) return -EINVAL; tab_size = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); if (adapter->fw->size < tab_size) return -EINVAL; offs = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * (idx)); descr = (struct uni_data_desc *)&unirom[offs]; data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); if (adapter->fw->size < data_size) return -EINVAL; return 0; } static int netxen_nic_validate_product_offs(struct netxen_adapter *adapter) { struct uni_table_desc *ptab_descr; const u8 *unirom = adapter->fw->data; int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ? 1 : netxen_p3_has_mn(adapter); __le32 entries; __le32 entry_size; u32 tab_size; u32 i; ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL); if (ptab_descr == NULL) return -EINVAL; entries = cpu_to_le32(ptab_descr->num_entries); entry_size = cpu_to_le32(ptab_descr->entry_size); tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); if (adapter->fw->size < tab_size) return -EINVAL; nomn: for (i = 0; i < entries; i++) { __le32 flags, file_chiprev, offs; u8 chiprev = adapter->ahw.revision_id; uint32_t flagbit; offs = cpu_to_le32(ptab_descr->findex) + (i * cpu_to_le32(ptab_descr->entry_size)); flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF)); file_chiprev = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_CHIP_REV_OFF)); flagbit = mn_present ? 1 : 2; if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) { adapter->file_prd_off = offs; return 0; } } if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) { mn_present = 0; goto nomn; } return -EINVAL; } static int netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter) { if (netxen_nic_validate_header(adapter)) { dev_err(&adapter->pdev->dev, "unified image: header validation failed\n"); return -EINVAL; } if (netxen_nic_validate_product_offs(adapter)) { dev_err(&adapter->pdev->dev, "unified image: product validation failed\n"); return -EINVAL; } if (netxen_nic_validate_bootld(adapter)) { dev_err(&adapter->pdev->dev, "unified image: bootld validation failed\n"); return -EINVAL; } if (netxen_nic_validate_fw(adapter)) { dev_err(&adapter->pdev->dev, "unified image: firmware validation failed\n"); return -EINVAL; } return 0; } static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter, u32 section, u32 idx_offset) { const u8 *unirom = adapter->fw->data; int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + idx_offset)); struct uni_table_desc *tab_desc; __le32 offs; tab_desc = nx_get_table_desc(unirom, section); if (tab_desc == NULL) return NULL; offs = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * idx); return (struct uni_data_desc *)&unirom[offs]; } static u8 * nx_get_bootld_offs(struct netxen_adapter *adapter) { u32 offs = NETXEN_BOOTLD_START; if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) offs = cpu_to_le32((nx_get_data_desc(adapter, NX_UNI_DIR_SECT_BOOTLD, NX_UNI_BOOTLD_IDX_OFF))->findex); return (u8 *)&adapter->fw->data[offs]; } static u8 * nx_get_fw_offs(struct netxen_adapter *adapter) { u32 offs = NETXEN_IMAGE_START; if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) offs = cpu_to_le32((nx_get_data_desc(adapter, NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF))->findex); return (u8 *)&adapter->fw->data[offs]; } static __le32 nx_get_fw_size(struct netxen_adapter *adapter) { if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) return cpu_to_le32((nx_get_data_desc(adapter, NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF))->size); else return cpu_to_le32( *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]); } static __le32 nx_get_fw_version(struct netxen_adapter *adapter) { struct uni_data_desc *fw_data_desc; const struct firmware *fw = adapter->fw; __le32 major, minor, sub; const u8 *ver_str; int i, ret = 0; if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { fw_data_desc = nx_get_data_desc(adapter, NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF); ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) + cpu_to_le32(fw_data_desc->size) - 17; for (i = 0; i < 12; i++) { if (!strncmp(&ver_str[i], "REV=", 4)) { ret = sscanf(&ver_str[i+4], "%u.%u.%u ", &major, &minor, &sub); break; } } if (ret != 3) return 0; return major + (minor << 8) + (sub << 16); } else return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]); } static __le32 nx_get_bios_version(struct netxen_adapter *adapter) { const struct firmware *fw = adapter->fw; __le32 bios_ver, prd_off = adapter->file_prd_off; if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) + NX_UNI_BIOS_VERSION_OFF)); return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24); } else return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); } int netxen_need_fw_reset(struct netxen_adapter *adapter) { u32 count, old_count; u32 val, version, major, minor, build; int i, timeout; u8 fw_type; /* NX2031 firmware doesn't support heartbit */ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 1; if (adapter->need_fw_reset) return 1; /* last attempt had failed */ if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) return 1; old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); for (i = 0; i < 10; i++) { timeout = msleep_interruptible(200); if (timeout) { NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); return -EINTR; } count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); if (count != old_count) break; } /* firmware is dead */ if (count == old_count) return 1; /* check if we have got newer or different file firmware */ if (adapter->fw) { val = nx_get_fw_version(adapter); version = NETXEN_DECODE_VERSION(val); major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); if (version > NETXEN_VERSION_CODE(major, minor, build)) return 1; if (version == NETXEN_VERSION_CODE(major, minor, build) && adapter->fw_type != NX_UNIFIED_ROMIMAGE) { val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL); fw_type = (val & 0x4) ? NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE; if (adapter->fw_type != fw_type) return 1; } } return 0; } #define NETXEN_MIN_P3_FW_SUPP NETXEN_VERSION_CODE(4, 0, 505) int netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter) { u32 flash_fw_ver, min_fw_ver; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 0; if (netxen_rom_fast_read(adapter, NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { dev_err(&adapter->pdev->dev, "Unable to read flash fw" "version\n"); return -EIO; } flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); min_fw_ver = NETXEN_MIN_P3_FW_SUPP; if (flash_fw_ver >= min_fw_ver) return 0; dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported" "[4.0.505]. Please update firmware on flash\n", _major(flash_fw_ver), _minor(flash_fw_ver), _build(flash_fw_ver)); return -EINVAL; } static char *fw_name[] = { NX_P2_MN_ROMIMAGE_NAME, NX_P3_CT_ROMIMAGE_NAME, NX_P3_MN_ROMIMAGE_NAME, NX_UNIFIED_ROMIMAGE_NAME, NX_FLASH_ROMIMAGE_NAME, }; int netxen_load_firmware(struct netxen_adapter *adapter) { u64 *ptr64; u32 i, flashaddr, size; const struct firmware *fw = adapter->fw; struct pci_dev *pdev = adapter->pdev; dev_info(&pdev->dev, "loading firmware from %s\n", fw_name[adapter->fw_type]); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1); if (fw) { __le64 data; size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; ptr64 = (u64 *)nx_get_bootld_offs(adapter); flashaddr = NETXEN_BOOTLD_START; for (i = 0; i < size; i++) { data = cpu_to_le64(ptr64[i]); if (adapter->pci_mem_write(adapter, flashaddr, data)) return -EIO; flashaddr += 8; } size = (__force u32)nx_get_fw_size(adapter) / 8; ptr64 = (u64 *)nx_get_fw_offs(adapter); flashaddr = NETXEN_IMAGE_START; for (i = 0; i < size; i++) { data = cpu_to_le64(ptr64[i]); if (adapter->pci_mem_write(adapter, flashaddr, data)) return -EIO; flashaddr += 8; } size = (__force u32)nx_get_fw_size(adapter) % 8; if (size) { data = cpu_to_le64(ptr64[i]); if (adapter->pci_mem_write(adapter, flashaddr, data)) return -EIO; } } else { u64 data; u32 hi, lo; size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; flashaddr = NETXEN_BOOTLD_START; for (i = 0; i < size; i++) { if (netxen_rom_fast_read(adapter, flashaddr, (int *)&lo) != 0) return -EIO; if (netxen_rom_fast_read(adapter, flashaddr + 4, (int *)&hi) != 0) return -EIO; /* hi, lo are already in host endian byteorder */ data = (((u64)hi << 32) | lo); if (adapter->pci_mem_write(adapter, flashaddr, data)) return -EIO; flashaddr += 8; } } msleep(1); if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020); NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e); } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d); else { NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff); NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0); } return 0; } static int netxen_validate_firmware(struct netxen_adapter *adapter) { __le32 val; __le32 flash_fw_ver; u32 file_fw_ver, min_ver, bios; struct pci_dev *pdev = adapter->pdev; const struct firmware *fw = adapter->fw; u8 fw_type = adapter->fw_type; u32 crbinit_fix_fw; if (fw_type == NX_UNIFIED_ROMIMAGE) { if (netxen_nic_validate_unified_romimage(adapter)) return -EINVAL; } else { val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]); if ((__force u32)val != NETXEN_BDINFO_MAGIC) return -EINVAL; if (fw->size < NX_FW_MIN_SIZE) return -EINVAL; } val = nx_get_fw_version(adapter); if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) min_ver = NETXEN_MIN_P3_FW_SUPP; else min_ver = NETXEN_VERSION_CODE(3, 4, 216); file_fw_ver = NETXEN_DECODE_VERSION(val); if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) || (file_fw_ver < min_ver)) { dev_err(&pdev->dev, "%s: firmware version %d.%d.%d unsupported\n", fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver), _build(file_fw_ver)); return -EINVAL; } val = nx_get_bios_version(adapter); if (netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios)) return -EIO; if ((__force u32)val != bios) { dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", fw_name[fw_type]); return -EINVAL; } if (netxen_rom_fast_read(adapter, NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { dev_err(&pdev->dev, "Unable to read flash fw version\n"); return -EIO; } flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); /* New fw from file is not allowed, if fw on flash is < 4.0.554 */ crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554); if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw && NX_IS_REVISION_P3(adapter->ahw.revision_id)) { dev_err(&pdev->dev, "Incompatibility detected between driver " "and firmware version on flash. This configuration " "is not recommended. Please update the firmware on " "flash immediately\n"); return -EINVAL; } /* check if flashed firmware is newer only for no-mn and P2 case*/ if (!netxen_p3_has_mn(adapter) || NX_IS_REVISION_P2(adapter->ahw.revision_id)) { if (flash_fw_ver > file_fw_ver) { dev_info(&pdev->dev, "%s: firmware is older than flash\n", fw_name[fw_type]); return -EINVAL; } } NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); return 0; } static void nx_get_next_fwtype(struct netxen_adapter *adapter) { u8 fw_type; switch (adapter->fw_type) { case NX_UNKNOWN_ROMIMAGE: fw_type = NX_UNIFIED_ROMIMAGE; break; case NX_UNIFIED_ROMIMAGE: if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) fw_type = NX_FLASH_ROMIMAGE; else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) fw_type = NX_P2_MN_ROMIMAGE; else if (netxen_p3_has_mn(adapter)) fw_type = NX_P3_MN_ROMIMAGE; else fw_type = NX_P3_CT_ROMIMAGE; break; case NX_P3_MN_ROMIMAGE: fw_type = NX_P3_CT_ROMIMAGE; break; case NX_P2_MN_ROMIMAGE: case NX_P3_CT_ROMIMAGE: default: fw_type = NX_FLASH_ROMIMAGE; break; } adapter->fw_type = fw_type; } static int netxen_p3_has_mn(struct netxen_adapter *adapter) { u32 capability, flashed_ver; capability = 0; /* NX2031 always had MN */ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 1; netxen_rom_fast_read(adapter, NX_FW_VERSION_OFFSET, (int *)&flashed_ver); flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) { capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY); if (capability & NX_PEG_TUNE_MN_PRESENT) return 1; } return 0; } void netxen_request_firmware(struct netxen_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int rc = 0; adapter->fw_type = NX_UNKNOWN_ROMIMAGE; next: nx_get_next_fwtype(adapter); if (adapter->fw_type == NX_FLASH_ROMIMAGE) { adapter->fw = NULL; } else { rc = request_firmware(&adapter->fw, fw_name[adapter->fw_type], &pdev->dev); if (rc != 0) goto next; rc = netxen_validate_firmware(adapter); if (rc != 0) { release_firmware(adapter->fw); msleep(1); goto next; } } } void netxen_release_firmware(struct netxen_adapter *adapter) { release_firmware(adapter->fw); adapter->fw = NULL; } int netxen_init_dummy_dma(struct netxen_adapter *adapter) { u64 addr; u32 hi, lo; if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 0; adapter->dummy_dma.addr = dma_alloc_coherent(&adapter->pdev->dev, NETXEN_HOST_DUMMY_DMA_SIZE, &adapter->dummy_dma.phys_addr, GFP_KERNEL); if (adapter->dummy_dma.addr == NULL) { dev_err(&adapter->pdev->dev, "ERROR: Could not allocate dummy DMA memory\n"); return -ENOMEM; } addr = (uint64_t) adapter->dummy_dma.phys_addr; hi = (addr >> 32) & 0xffffffff; lo = addr & 0xffffffff; NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi); NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo); return 0; } /* * NetXen DMA watchdog control: * * Bit 0 : enabled => R/O: 1 watchdog active, 0 inactive * Bit 1 : disable_request => 1 req disable dma watchdog * Bit 2 : enable_request => 1 req enable dma watchdog * Bit 3-31 : unused */ void netxen_free_dummy_dma(struct netxen_adapter *adapter) { int i = 100; u32 ctrl; if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) return; if (!adapter->dummy_dma.addr) return; ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); if ((ctrl & 0x1) != 0) { NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2)); while ((ctrl & 0x1) != 0) { msleep(50); ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); if (--i == 0) break; } } if (i) { dma_free_coherent(&adapter->pdev->dev, NETXEN_HOST_DUMMY_DMA_SIZE, adapter->dummy_dma.addr, adapter->dummy_dma.phys_addr); adapter->dummy_dma.addr = NULL; } else dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n"); } int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) { u32 val = 0; int retries = 60; if (pegtune_val) return 0; do { val = NXRD32(adapter, CRB_CMDPEG_STATE); switch (val) { case PHAN_INITIALIZE_COMPLETE: case PHAN_INITIALIZE_ACK: return 0; case PHAN_INITIALIZE_FAILED: goto out_err; default: break; } msleep(500); } while (--retries); NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); out_err: dev_warn(&adapter->pdev->dev, "firmware init failed\n"); return -EIO; } static int netxen_receive_peg_ready(struct netxen_adapter *adapter) { u32 val = 0; int retries = 2000; do { val = NXRD32(adapter, CRB_RCVPEG_STATE); if (val == PHAN_PEG_RCV_INITIALIZED) return 0; msleep(10); } while (--retries); pr_err("Receive Peg initialization not complete, state: 0x%x.\n", val); return -EIO; } int netxen_init_firmware(struct netxen_adapter *adapter) { int err; err = netxen_receive_peg_ready(adapter); if (err) return err; NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT); NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE); NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC); return err; } static void netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg) { u32 cable_OUI; u16 cable_len; u16 link_speed; u8 link_status, module, duplex, autoneg; struct net_device *netdev = adapter->netdev; adapter->has_link_events = 1; cable_OUI = msg->body[1] & 0xffffffff; cable_len = (msg->body[1] >> 32) & 0xffff; link_speed = (msg->body[1] >> 48) & 0xffff; link_status = msg->body[2] & 0xff; duplex = (msg->body[2] >> 16) & 0xff; autoneg = (msg->body[2] >> 24) & 0xff; module = (msg->body[2] >> 8) & 0xff; if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) { printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n", netdev->name, cable_OUI, cable_len); } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) { printk(KERN_INFO "%s: unsupported cable length %d\n", netdev->name, cable_len); } /* update link parameters */ if (duplex == LINKEVENT_FULL_DUPLEX) adapter->link_duplex = DUPLEX_FULL; else adapter->link_duplex = DUPLEX_HALF; adapter->module_type = module; adapter->link_autoneg = autoneg; adapter->link_speed = link_speed; netxen_advert_link_change(adapter, link_status); } static void netxen_handle_fw_message(int desc_cnt, int index, struct nx_host_sds_ring *sds_ring) { nx_fw_msg_t msg; struct status_desc *desc; int i = 0, opcode; while (desc_cnt > 0 && i < 8) { desc = &sds_ring->desc_head[index]; msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); index = get_next_index(index, sds_ring->num_desc); desc_cnt--; } opcode = netxen_get_nic_msg_opcode(msg.body[0]); switch (opcode) { case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: netxen_handle_linkevent(sds_ring->adapter, &msg); break; default: break; } } static int netxen_alloc_rx_skb(struct netxen_adapter *adapter, struct nx_host_rds_ring *rds_ring, struct netxen_rx_buffer *buffer) { struct sk_buff *skb; dma_addr_t dma; struct pci_dev *pdev = adapter->pdev; buffer->skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size); if (!buffer->skb) return 1; skb = buffer->skb; if (!adapter->ahw.cut_through) skb_reserve(skb, 2); dma = dma_map_single(&pdev->dev, skb->data, rds_ring->dma_size, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, dma)) { dev_kfree_skb_any(skb); buffer->skb = NULL; return 1; } buffer->skb = skb; buffer->dma = dma; buffer->state = NETXEN_BUFFER_BUSY; return 0; } static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum) { struct netxen_rx_buffer *buffer; struct sk_buff *skb; buffer = &rds_ring->rx_buf_arr[index]; dma_unmap_single(&adapter->pdev->dev, buffer->dma, rds_ring->dma_size, DMA_FROM_DEVICE); skb = buffer->skb; if (!skb) goto no_skb; if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && cksum == STATUS_CKSUM_OK)) { adapter->stats.csummed++; skb->ip_summed = CHECKSUM_UNNECESSARY; } else skb->ip_summed = CHECKSUM_NONE; buffer->skb = NULL; no_skb: buffer->state = NETXEN_BUFFER_FREE; return skb; } static struct netxen_rx_buffer * netxen_process_rcv(struct netxen_adapter *adapter, struct nx_host_sds_ring *sds_ring, int ring, u64 sts_data0) { struct net_device *netdev = adapter->netdev; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; struct netxen_rx_buffer *buffer; struct sk_buff *skb; struct nx_host_rds_ring *rds_ring; int index, length, cksum, pkt_offset; if (unlikely(ring >= adapter->max_rds_rings)) return NULL; rds_ring = &recv_ctx->rds_rings[ring]; index = netxen_get_sts_refhandle(sts_data0); if (unlikely(index >= rds_ring->num_desc)) return NULL; buffer = &rds_ring->rx_buf_arr[index]; length = netxen_get_sts_totallength(sts_data0); cksum = netxen_get_sts_status(sts_data0); pkt_offset = netxen_get_sts_pkt_offset(sts_data0); skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum); if (!skb) return buffer; if (length > rds_ring->skb_size) skb_put(skb, rds_ring->skb_size); else skb_put(skb, length); if (pkt_offset) skb_pull(skb, pkt_offset); skb->protocol = eth_type_trans(skb, netdev); napi_gro_receive(&sds_ring->napi, skb); adapter->stats.rx_pkts++; adapter->stats.rxbytes += length; return buffer; } #define TCP_HDR_SIZE 20 #define TCP_TS_OPTION_SIZE 12 #define TCP_TS_HDR_SIZE (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE) static struct netxen_rx_buffer * netxen_process_lro(struct netxen_adapter *adapter, struct nx_host_sds_ring *sds_ring, int ring, u64 sts_data0, u64 sts_data1) { struct net_device *netdev = adapter->netdev; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; struct netxen_rx_buffer *buffer; struct sk_buff *skb; struct nx_host_rds_ring *rds_ring; struct iphdr *iph; struct tcphdr *th; bool push, timestamp; int l2_hdr_offset, l4_hdr_offset; int index; u16 lro_length, length, data_offset; u32 seq_number; u8 vhdr_len = 0; if (unlikely(ring >= adapter->max_rds_rings)) return NULL; rds_ring = &recv_ctx->rds_rings[ring]; index = netxen_get_lro_sts_refhandle(sts_data0); if (unlikely(index >= rds_ring->num_desc)) return NULL; buffer = &rds_ring->rx_buf_arr[index]; timestamp = netxen_get_lro_sts_timestamp(sts_data0); lro_length = netxen_get_lro_sts_length(sts_data0); l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0); l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0); push = netxen_get_lro_sts_push_flag(sts_data0); seq_number = netxen_get_lro_sts_seq_number(sts_data1); skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); if (!skb) return buffer; if (timestamp) data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE; else data_offset = l4_hdr_offset + TCP_HDR_SIZE; skb_put(skb, lro_length + data_offset); skb_pull(skb, l2_hdr_offset); skb->protocol = eth_type_trans(skb, netdev); if (skb->protocol == htons(ETH_P_8021Q)) vhdr_len = VLAN_HLEN; iph = (struct iphdr *)(skb->data + vhdr_len); th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2)); length = (iph->ihl << 2) + (th->doff << 2) + lro_length; csum_replace2(&iph->check, iph->tot_len, htons(length)); iph->tot_len = htons(length); th->psh = push; th->seq = htonl(seq_number); length = skb->len; if (adapter->flags & NETXEN_FW_MSS_CAP) skb_shinfo(skb)->gso_size = netxen_get_lro_sts_mss(sts_data1); netif_receive_skb(skb); adapter->stats.lro_pkts++; adapter->stats.rxbytes += length; return buffer; } #define netxen_merge_rx_buffers(list, head) \ do { list_splice_tail_init(list, head); } while (0); int netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max) { struct netxen_adapter *adapter = sds_ring->adapter; struct list_head *cur; struct status_desc *desc; struct netxen_rx_buffer *rxbuf; u32 consumer = sds_ring->consumer; int count = 0; u64 sts_data0, sts_data1; int opcode, ring = 0, desc_cnt; while (count < max) { desc = &sds_ring->desc_head[consumer]; sts_data0 = le64_to_cpu(desc->status_desc_data[0]); if (!(sts_data0 & STATUS_OWNER_HOST)) break; desc_cnt = netxen_get_sts_desc_cnt(sts_data0); opcode = netxen_get_sts_opcode(sts_data0); switch (opcode) { case NETXEN_NIC_RXPKT_DESC: case NETXEN_OLD_RXPKT_DESC: case NETXEN_NIC_SYN_OFFLOAD: ring = netxen_get_sts_type(sts_data0); rxbuf = netxen_process_rcv(adapter, sds_ring, ring, sts_data0); break; case NETXEN_NIC_LRO_DESC: ring = netxen_get_lro_sts_type(sts_data0); sts_data1 = le64_to_cpu(desc->status_desc_data[1]); rxbuf = netxen_process_lro(adapter, sds_ring, ring, sts_data0, sts_data1); break; case NETXEN_NIC_RESPONSE_DESC: netxen_handle_fw_message(desc_cnt, consumer, sds_ring); goto skip; default: goto skip; } WARN_ON(desc_cnt > 1); if (rxbuf) list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); skip: for (; desc_cnt > 0; desc_cnt--) { desc = &sds_ring->desc_head[consumer]; desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); consumer = get_next_index(consumer, sds_ring->num_desc); } count++; } for (ring = 0; ring < adapter->max_rds_rings; ring++) { struct nx_host_rds_ring *rds_ring = &adapter->recv_ctx.rds_rings[ring]; if (!list_empty(&sds_ring->free_list[ring])) { list_for_each(cur, &sds_ring->free_list[ring]) { rxbuf = list_entry(cur, struct netxen_rx_buffer, list); netxen_alloc_rx_skb(adapter, rds_ring, rxbuf); } spin_lock(&rds_ring->lock); netxen_merge_rx_buffers(&sds_ring->free_list[ring], &rds_ring->free_list); spin_unlock(&rds_ring->lock); } netxen_post_rx_buffers_nodb(adapter, rds_ring); } if (count) { sds_ring->consumer = consumer; NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer); } return count; } /* Process Command status ring */ int netxen_process_cmd_ring(struct netxen_adapter *adapter) { u32 sw_consumer, hw_consumer; int count = 0, i; struct netxen_cmd_buffer *buffer; struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; struct netxen_skb_frag *frag; int done = 0; struct nx_host_tx_ring *tx_ring = adapter->tx_ring; if (!spin_trylock_bh(&adapter->tx_clean_lock)) return 1; sw_consumer = tx_ring->sw_consumer; hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); while (sw_consumer != hw_consumer) { buffer = &tx_ring->cmd_buf_arr[sw_consumer]; if (buffer->skb) { frag = &buffer->frag_array[0]; dma_unmap_single(&pdev->dev, frag->dma, frag->length, DMA_TO_DEVICE); frag->dma = 0ULL; for (i = 1; i < buffer->frag_count; i++) { frag++; /* Get the next frag */ dma_unmap_page(&pdev->dev, frag->dma, frag->length, DMA_TO_DEVICE); frag->dma = 0ULL; } adapter->stats.xmitfinished++; dev_kfree_skb_any(buffer->skb); buffer->skb = NULL; } sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); if (++count >= MAX_STATUS_HANDLE) break; } tx_ring->sw_consumer = sw_consumer; if (count && netif_running(netdev)) { smp_mb(); if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) netif_wake_queue(netdev); adapter->tx_timeo_cnt = 0; } /* * If everything is freed up to consumer then check if the ring is full * If the ring is full then check if more needs to be freed and * schedule the call back again. * * This happens when there are 2 CPUs. One could be freeing and the * other filling it. If the ring is full when we get out of here and * the card has already interrupted the host then the host can miss the * interrupt. * * There is still a possible race condition and the host could miss an * interrupt. The card has to take care of this. */ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); done = (sw_consumer == hw_consumer); spin_unlock_bh(&adapter->tx_clean_lock); return done; } void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, struct nx_host_rds_ring *rds_ring) { struct rcv_desc *pdesc; struct netxen_rx_buffer *buffer; int producer, count = 0; netxen_ctx_msg msg = 0; struct list_head *head; producer = rds_ring->producer; head = &rds_ring->free_list; while (!list_empty(head)) { buffer = list_entry(head->next, struct netxen_rx_buffer, list); if (!buffer->skb) { if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) break; } count++; list_del(&buffer->list); /* make a rcv descriptor */ pdesc = &rds_ring->desc_head[producer]; pdesc->addr_buffer = cpu_to_le64(buffer->dma); pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); producer = get_next_index(producer, rds_ring->num_desc); } if (count) { rds_ring->producer = producer; NXWRIO(adapter, rds_ring->crb_rcv_producer, (producer-1) & (rds_ring->num_desc-1)); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { /* * Write a doorbell msg to tell phanmon of change in * receive ring producer * Only for firmware version < 4.0.0 */ netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); netxen_set_msg_privid(msg); netxen_set_msg_count(msg, ((producer - 1) & (rds_ring->num_desc - 1))); netxen_set_msg_ctxid(msg, adapter->portnum); netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); NXWRIO(adapter, DB_NORMALIZE(adapter, NETXEN_RCV_PRODUCER_OFFSET), msg); } } } static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, struct nx_host_rds_ring *rds_ring) { struct rcv_desc *pdesc; struct netxen_rx_buffer *buffer; int producer, count = 0; struct list_head *head; if (!spin_trylock(&rds_ring->lock)) return; producer = rds_ring->producer; head = &rds_ring->free_list; while (!list_empty(head)) { buffer = list_entry(head->next, struct netxen_rx_buffer, list); if (!buffer->skb) { if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) break; } count++; list_del(&buffer->list); /* make a rcv descriptor */ pdesc = &rds_ring->desc_head[producer]; pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); pdesc->addr_buffer = cpu_to_le64(buffer->dma); producer = get_next_index(producer, rds_ring->num_desc); } if (count) { rds_ring->producer = producer; NXWRIO(adapter, rds_ring->crb_rcv_producer, (producer - 1) & (rds_ring->num_desc - 1)); } spin_unlock(&rds_ring->lock); } void netxen_nic_clear_stats(struct netxen_adapter *adapter) { memset(&adapter->stats, 0, sizeof(adapter->stats)); }
linux-master
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2003 - 2009 NetXen, Inc. * Copyright (C) 2009 - QLogic Corporation. * All rights reserved. */ #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/slab.h> #include "netxen_nic.h" #include "netxen_nic_hw.h" #include <net/ip.h> #define MASK(n) ((1ULL<<(n))-1) #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) #define MS_WIN(addr) (addr & 0x0ffc0000) #define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) #define CRB_BLK(off) ((off >> 20) & 0x3f) #define CRB_SUBBLK(off) ((off >> 16) & 0xf) #define CRB_WINDOW_2M (0x130060) #define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) #define CRB_INDIRECT_2M (0x1e0000UL) static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, void __iomem *addr, u32 data); static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, void __iomem *addr); #define PCI_OFFSET_FIRST_RANGE(adapter, off) \ ((adapter)->ahw.pci_base0 + (off)) #define PCI_OFFSET_SECOND_RANGE(adapter, off) \ ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START) #define PCI_OFFSET_THIRD_RANGE(adapter, off) \ ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START) static void __iomem *pci_base_offset(struct netxen_adapter *adapter, unsigned long off) { if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END)) return PCI_OFFSET_FIRST_RANGE(adapter, off); if (ADDR_IN_RANGE(off, SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_END)) return PCI_OFFSET_SECOND_RANGE(adapter, off); if (ADDR_IN_RANGE(off, THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_END)) return PCI_OFFSET_THIRD_RANGE(adapter, off); return NULL; } static crb_128M_2M_block_map_t crb_128M_2M_map[64] __cacheline_aligned_in_smp = { {{{0, 0, 0, 0} } }, /* 0: PCI */ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ {1, 0x0110000, 0x0120000, 0x130000}, {1, 0x0120000, 0x0122000, 0x124000}, {1, 0x0130000, 0x0132000, 0x126000}, {1, 0x0140000, 0x0142000, 0x128000}, {1, 0x0150000, 0x0152000, 0x12a000}, {1, 0x0160000, 0x0170000, 0x110000}, {1, 0x0170000, 0x0172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x01e0000, 0x01e0800, 0x122000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ {{{0, 0, 0, 0} } }, /* 3: */ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x08f0000, 0x08f2000, 0x172000} } }, {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x09f0000, 0x09f2000, 0x176000} } }, {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0af0000, 0x0af2000, 0x17a000} } }, {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ {{{0, 0, 0, 0} } }, /* 23: */ {{{0, 0, 0, 0} } }, /* 24: */ {{{0, 0, 0, 0} } }, /* 25: */ {{{0, 0, 0, 0} } }, /* 26: */ {{{0, 0, 0, 0} } }, /* 27: */ {{{0, 0, 0, 0} } }, /* 28: */ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ {{{0} } }, /* 32: PCI */ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ {1, 0x2110000, 0x2120000, 0x130000}, {1, 0x2120000, 0x2122000, 0x124000}, {1, 0x2130000, 0x2132000, 0x126000}, {1, 0x2140000, 0x2142000, 0x128000}, {1, 0x2150000, 0x2152000, 0x12a000}, {1, 0x2160000, 0x2170000, 0x110000}, {1, 0x2170000, 0x2172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ {{{0} } }, /* 35: */ {{{0} } }, /* 36: */ {{{0} } }, /* 37: */ {{{0} } }, /* 38: */ {{{0} } }, /* 39: */ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ {{{0} } }, /* 52: */ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ {{{0} } }, /* 59: I2C0 */ {{{0} } }, /* 60: I2C1 */ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ }; /* * top 12 bits of crb internal address (hub, agent) */ static unsigned crb_hub_agt[64] = { 0, NETXEN_HW_CRB_HUB_AGT_ADR_PS, NETXEN_HW_CRB_HUB_AGT_ADR_MN, NETXEN_HW_CRB_HUB_AGT_ADR_MS, 0, NETXEN_HW_CRB_HUB_AGT_ADR_SRE, NETXEN_HW_CRB_HUB_AGT_ADR_NIU, NETXEN_HW_CRB_HUB_AGT_ADR_QMN, NETXEN_HW_CRB_HUB_AGT_ADR_SQN0, NETXEN_HW_CRB_HUB_AGT_ADR_SQN1, NETXEN_HW_CRB_HUB_AGT_ADR_SQN2, NETXEN_HW_CRB_HUB_AGT_ADR_SQN3, NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, NETXEN_HW_CRB_HUB_AGT_ADR_PGN4, NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, NETXEN_HW_CRB_HUB_AGT_ADR_PGN0, NETXEN_HW_CRB_HUB_AGT_ADR_PGN1, NETXEN_HW_CRB_HUB_AGT_ADR_PGN2, NETXEN_HW_CRB_HUB_AGT_ADR_PGN3, NETXEN_HW_CRB_HUB_AGT_ADR_PGND, NETXEN_HW_CRB_HUB_AGT_ADR_PGNI, NETXEN_HW_CRB_HUB_AGT_ADR_PGS0, NETXEN_HW_CRB_HUB_AGT_ADR_PGS1, NETXEN_HW_CRB_HUB_AGT_ADR_PGS2, NETXEN_HW_CRB_HUB_AGT_ADR_PGS3, 0, NETXEN_HW_CRB_HUB_AGT_ADR_PGSI, NETXEN_HW_CRB_HUB_AGT_ADR_SN, 0, NETXEN_HW_CRB_HUB_AGT_ADR_EG, 0, NETXEN_HW_CRB_HUB_AGT_ADR_PS, NETXEN_HW_CRB_HUB_AGT_ADR_CAM, 0, 0, 0, 0, 0, NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, 0, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7, NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, 0, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8, NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9, NETXEN_HW_CRB_HUB_AGT_ADR_OCM0, 0, NETXEN_HW_CRB_HUB_AGT_ADR_SMB, NETXEN_HW_CRB_HUB_AGT_ADR_I2C0, NETXEN_HW_CRB_HUB_AGT_ADR_I2C1, 0, NETXEN_HW_CRB_HUB_AGT_ADR_PGNC, 0, }; /* PCI Windowing for DDR regions. */ #define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ #define NETXEN_PCIE_SEM_TIMEOUT 10000 static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu); int netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg) { int done = 0, timeout = 0; while (!done) { done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_LOCK(sem))); if (done == 1) break; if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT) return -EIO; msleep(1); } if (id_reg) NXWR32(adapter, id_reg, adapter->portnum); return 0; } void netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem) { NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem))); } static int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) { if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447); NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5); } return 0; } /* Disable an XG interface */ static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) { __u32 mac_cfg; u32 port = adapter->physical_port; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) return 0; if (port >= NETXEN_NIU_MAX_XG_PORTS) return -EINVAL; mac_cfg = 0; if (NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg)) return -EIO; return 0; } #define NETXEN_UNICAST_ADDR(port, index) \ (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8)) #define NETXEN_MCAST_ADDR(port, index) \ (NETXEN_MULTICAST_ADDR_BASE+(port*0x80)+(index*8)) #define MAC_HI(addr) \ ((addr[2] << 16) | (addr[1] << 8) | (addr[0])) #define MAC_LO(addr) \ ((addr[5] << 16) | (addr[4] << 8) | (addr[3])) static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) { u32 mac_cfg; u32 cnt = 0; __u32 reg = 0x0200; u32 port = adapter->physical_port; u16 board_type = adapter->ahw.board_type; if (port >= NETXEN_NIU_MAX_XG_PORTS) return -EINVAL; mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port)); mac_cfg &= ~0x4; NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) || (board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ)) reg = (0x20 << port); NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg); mdelay(10); while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20) mdelay(10); if (cnt < 20) { reg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port)); if (mode == NETXEN_NIU_PROMISC_MODE) reg = (reg | 0x2000UL); else reg = (reg & ~0x2000UL); if (mode == NETXEN_NIU_ALLMULTI_MODE) reg = (reg | 0x1000UL); else reg = (reg & ~0x1000UL); NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg); } mac_cfg |= 0x4; NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); return 0; } static int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) { u32 mac_hi, mac_lo; u32 reg_hi, reg_lo; u8 phy = adapter->physical_port; if (phy >= NETXEN_NIU_MAX_XG_PORTS) return -EINVAL; mac_lo = ((u32)addr[0] << 16) | ((u32)addr[1] << 24); mac_hi = addr[2] | ((u32)addr[3] << 8) | ((u32)addr[4] << 16) | ((u32)addr[5] << 24); reg_lo = NETXEN_NIU_XGE_STATION_ADDR_0_1 + (0x10000 * phy); reg_hi = NETXEN_NIU_XGE_STATION_ADDR_0_HI + (0x10000 * phy); /* write twice to flush */ if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) return -EIO; if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) return -EIO; return 0; } static int netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter) { u32 val = 0; u16 port = adapter->physical_port; u8 *addr = adapter->mac_addr; if (adapter->mc_enabled) return 0; val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); val |= (1UL << (28+port)); NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); /* add broadcast addr to filter */ val = 0xffffff; NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); /* add station addr to filter */ val = MAC_HI(addr); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), val); val = MAC_LO(addr); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, val); adapter->mc_enabled = 1; return 0; } static int netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter) { u32 val = 0; u16 port = adapter->physical_port; u8 *addr = adapter->mac_addr; if (!adapter->mc_enabled) return 0; val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); val &= ~(1UL << (28+port)); NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); val = MAC_HI(addr); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); val = MAC_LO(addr); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), 0); NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0); adapter->mc_enabled = 0; return 0; } static int netxen_nic_set_mcast_addr(struct netxen_adapter *adapter, int index, u8 *addr) { u32 hi = 0, lo = 0; u16 port = adapter->physical_port; lo = MAC_LO(addr); hi = MAC_HI(addr); NXWR32(adapter, NETXEN_MCAST_ADDR(port, index), hi); NXWR32(adapter, NETXEN_MCAST_ADDR(port, index)+4, lo); return 0; } static void netxen_p2_nic_set_multi(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); struct netdev_hw_addr *ha; u8 null_addr[ETH_ALEN]; int i; eth_zero_addr(null_addr); if (netdev->flags & IFF_PROMISC) { adapter->set_promisc(adapter, NETXEN_NIU_PROMISC_MODE); /* Full promiscuous mode */ netxen_nic_disable_mcast_filter(adapter); return; } if (netdev_mc_empty(netdev)) { adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE); netxen_nic_disable_mcast_filter(adapter); return; } adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE); if (netdev->flags & IFF_ALLMULTI || netdev_mc_count(netdev) > adapter->max_mc_count) { netxen_nic_disable_mcast_filter(adapter); return; } netxen_nic_enable_mcast_filter(adapter); i = 0; netdev_for_each_mc_addr(ha, netdev) netxen_nic_set_mcast_addr(adapter, i++, ha->addr); /* Clear out remaining addresses */ while (i < adapter->max_mc_count) netxen_nic_set_mcast_addr(adapter, i++, null_addr); } static int netxen_send_cmd_descs(struct netxen_adapter *adapter, struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) { u32 i, producer; struct netxen_cmd_buffer *pbuf; struct nx_host_tx_ring *tx_ring; i = 0; if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return -EIO; tx_ring = adapter->tx_ring; __netif_tx_lock_bh(tx_ring->txq); producer = tx_ring->producer; if (nr_desc >= netxen_tx_avail(tx_ring)) { netif_tx_stop_queue(tx_ring->txq); smp_mb(); if (netxen_tx_avail(tx_ring) > nr_desc) { if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) netif_tx_wake_queue(tx_ring->txq); } else { __netif_tx_unlock_bh(tx_ring->txq); return -EBUSY; } } do { pbuf = &tx_ring->cmd_buf_arr[producer]; pbuf->skb = NULL; pbuf->frag_count = 0; memcpy(&tx_ring->desc_head[producer], &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); producer = get_next_index(producer, tx_ring->num_desc); i++; } while (i != nr_desc); tx_ring->producer = producer; netxen_nic_update_cmd_producer(adapter, tx_ring); __netif_tx_unlock_bh(tx_ring->txq); return 0; } static int nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op) { nx_nic_req_t req; nx_mac_req_t *mac_req; u64 word; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23); word = NX_MAC_EVENT | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); mac_req = (nx_mac_req_t *)&req.words[0]; mac_req->op = op; memcpy(mac_req->mac_addr, addr, ETH_ALEN); return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); } static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, const u8 *addr, struct list_head *del_list) { struct list_head *head; nx_mac_list_t *cur; /* look up if already exists */ list_for_each(head, del_list) { cur = list_entry(head, nx_mac_list_t, list); if (ether_addr_equal(addr, cur->mac_addr)) { list_move_tail(head, &adapter->mac_list); return 0; } } cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC); if (cur == NULL) return -ENOMEM; memcpy(cur->mac_addr, addr, ETH_ALEN); list_add_tail(&cur->list, &adapter->mac_list); return nx_p3_sre_macaddr_change(adapter, cur->mac_addr, NETXEN_MAC_ADD); } static void netxen_p3_nic_set_multi(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); struct netdev_hw_addr *ha; static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u32 mode = VPORT_MISS_MODE_DROP; LIST_HEAD(del_list); struct list_head *head; nx_mac_list_t *cur; if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return; list_splice_tail_init(&adapter->mac_list, &del_list); nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list); nx_p3_nic_add_mac(adapter, bcast_addr, &del_list); if (netdev->flags & IFF_PROMISC) { mode = VPORT_MISS_MODE_ACCEPT_ALL; goto send_fw_cmd; } if ((netdev->flags & IFF_ALLMULTI) || (netdev_mc_count(netdev) > adapter->max_mc_count)) { mode = VPORT_MISS_MODE_ACCEPT_MULTI; goto send_fw_cmd; } if (!netdev_mc_empty(netdev)) { netdev_for_each_mc_addr(ha, netdev) nx_p3_nic_add_mac(adapter, ha->addr, &del_list); } send_fw_cmd: adapter->set_promisc(adapter, mode); head = &del_list; while (!list_empty(head)) { cur = list_entry(head->next, nx_mac_list_t, list); nx_p3_sre_macaddr_change(adapter, cur->mac_addr, NETXEN_MAC_DEL); list_del(&cur->list); kfree(cur); } } static int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) { nx_nic_req_t req; u64 word; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(mode); return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); } void netxen_p3_free_mac_list(struct netxen_adapter *adapter) { nx_mac_list_t *cur; struct list_head *head = &adapter->mac_list; while (!list_empty(head)) { cur = list_entry(head->next, nx_mac_list_t, list); nx_p3_sre_macaddr_change(adapter, cur->mac_addr, NETXEN_MAC_DEL); list_del(&cur->list); kfree(cur); } } static int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) { /* assuming caller has already copied new addr to netdev */ netxen_p3_nic_set_multi(adapter->netdev); return 0; } #define NETXEN_CONFIG_INTR_COALESCE 3 /* * Send the interrupt coalescing parameter set by ethtool to the card. */ int netxen_config_intr_coalesce(struct netxen_adapter *adapter) { nx_nic_req_t req; u64 word[6]; int rv, i; memset(&req, 0, sizeof(nx_nic_req_t)); memset(word, 0, sizeof(word)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word[0]); memcpy(&word[0], &adapter->coal, sizeof(adapter->coal)); for (i = 0; i < 6; i++) req.words[i] = cpu_to_le64(word[i]); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "ERROR. Could not send " "interrupt coalescing parameters\n"); } return rv; } int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable) { nx_nic_req_t req; u64 word; int rv = 0; if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) return 0; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(enable); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "ERROR. Could not send " "configure hw lro request\n"); } return rv; } int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable) { nx_nic_req_t req; u64 word; int rv = 0; if (!!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED) == enable) return rv; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_CONFIG_BRIDGING | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(enable); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "ERROR. Could not send " "configure bridge mode request\n"); } adapter->flags ^= NETXEN_NIC_BRIDGE_ENABLED; return rv; } #define RSS_HASHTYPE_IP_TCP 0x3 int netxen_config_rss(struct netxen_adapter *adapter, int enable) { nx_nic_req_t req; u64 word; int i, rv; static const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 0x255b0ec26d5a56daULL }; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); /* * RSS request: * bits 3-0: hash_method * 5-4: hash_type_ipv4 * 7-6: hash_type_ipv6 * 8: enable * 9: use indirection table * 47-10: reserved * 63-48: indirection table mask */ word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | ((u64)(enable & 0x1) << 8) | ((0x7ULL) << 48); req.words[0] = cpu_to_le64(word); for (i = 0; i < ARRAY_SIZE(key); i++) req.words[i+1] = cpu_to_le64(key[i]); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "%s: could not configure RSS\n", adapter->netdev->name); } return rv; } int netxen_config_ipaddr(struct netxen_adapter *adapter, __be32 ip, int cmd) { nx_nic_req_t req; u64 word; int rv; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(cmd); memcpy(&req.words[1], &ip, sizeof(u32)); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "%s: could not notify %s IP 0x%x request\n", adapter->netdev->name, (cmd == NX_IP_UP) ? "Add" : "Remove", ip); } return rv; } int netxen_linkevent_request(struct netxen_adapter *adapter, int enable) { nx_nic_req_t req; u64 word; int rv; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); req.req_hdr = cpu_to_le64(word); req.words[0] = cpu_to_le64(enable | (enable << 8)); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "%s: could not configure link notification\n", adapter->netdev->name); } return rv; } int netxen_send_lro_cleanup(struct netxen_adapter *adapter) { nx_nic_req_t req; u64 word; int rv; if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) return 0; memset(&req, 0, sizeof(nx_nic_req_t)); req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); word = NX_NIC_H2C_OPCODE_LRO_REQUEST | ((u64)adapter->portnum << 16) | ((u64)NX_NIC_LRO_REQUEST_CLEANUP << 56) ; req.req_hdr = cpu_to_le64(word); rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); if (rv != 0) { printk(KERN_ERR "%s: could not cleanup lro flows\n", adapter->netdev->name); } return rv; } /* * netxen_nic_change_mtu - Change the Maximum Transfer Unit * @returns 0 on success, negative on failure */ #define MTU_FUDGE_FACTOR 100 int netxen_nic_change_mtu(struct net_device *netdev, int mtu) { struct netxen_adapter *adapter = netdev_priv(netdev); int rc = 0; if (adapter->set_mtu) rc = adapter->set_mtu(adapter, mtu); if (!rc) netdev->mtu = mtu; return rc; } static int netxen_get_flash_block(struct netxen_adapter *adapter, int base, int size, __le32 * buf) { int i, v, addr; __le32 *ptr32; int ret; addr = base; ptr32 = buf; for (i = 0; i < size / sizeof(u32); i++) { ret = netxen_rom_fast_read(adapter, addr, &v); if (ret) return ret; *ptr32 = cpu_to_le32(v); ptr32++; addr += sizeof(u32); } if ((char *)buf + size > (char *)ptr32) { __le32 local; ret = netxen_rom_fast_read(adapter, addr, &v); if (ret) return ret; local = cpu_to_le32(v); memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32); } return 0; } int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac) { __le32 *pmac = (__le32 *) mac; u32 offset; offset = NX_FW_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64)); if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1) return -1; if (*mac == ~0ULL) { offset = NX_OLD_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64)); if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1) return -1; if (*mac == ~0ULL) return -1; } return 0; } int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac) { uint32_t crbaddr, mac_hi, mac_lo; int pci_func = adapter->ahw.pci_func; crbaddr = CRB_MAC_BLOCK_START + (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); mac_lo = NXRD32(adapter, crbaddr); mac_hi = NXRD32(adapter, crbaddr+4); if (pci_func & 1) *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16)); else *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32)); return 0; } /* * Changes the CRB window to the specified window. */ static void netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter, u32 window) { void __iomem *offset; int count = 10; u8 func = adapter->ahw.pci_func; if (adapter->ahw.crb_win == window) return; offset = PCI_OFFSET_SECOND_RANGE(adapter, NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func))); writel(window, offset); do { if (window == readl(offset)) break; if (printk_ratelimit()) dev_warn(&adapter->pdev->dev, "failed to set CRB window to %d\n", (window == NETXEN_WINDOW_ONE)); udelay(1); } while (--count > 0); if (count > 0) adapter->ahw.crb_win = window; } /* * Returns < 0 if off is not valid, * 1 if window access is needed. 'off' is set to offset from * CRB space in 128M pci map * 0 if no window access is needed. 'off' is set to 2M addr * In: 'off' is offset from base in 128M pci map */ static int netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, ulong off, void __iomem **addr) { crb_128M_2M_sub_block_map_t *m; if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE)) return -EINVAL; off -= NETXEN_PCI_CRBSPACE; /* * Try direct map */ m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { *addr = adapter->ahw.pci_base0 + m->start_2M + (off - m->start_128M); return 0; } /* * Not in direct map, use crb window */ *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + (off & MASK(16)); return 1; } /* * In: 'off' is offset from CRB space in 128M pci map * Out: 'off' is 2M pci map addr * side effect: lock crb window */ static void netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off) { u32 window; void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M; off -= NETXEN_PCI_CRBSPACE; window = CRB_HI(off); writel(window, addr); if (readl(addr) != window) { if (printk_ratelimit()) dev_warn(&adapter->pdev->dev, "failed to set CRB window to %d off 0x%lx\n", window, off); } } static void __iomem * netxen_nic_map_indirect_address_128M(struct netxen_adapter *adapter, ulong win_off, void __iomem **mem_ptr) { ulong off = win_off; void __iomem *addr; resource_size_t mem_base; if (ADDR_IN_WINDOW1(win_off)) off = NETXEN_CRB_NORMAL(win_off); addr = pci_base_offset(adapter, off); if (addr) return addr; if (adapter->ahw.pci_len0 == 0) off -= NETXEN_PCI_CRBSPACE; mem_base = pci_resource_start(adapter->pdev, 0); *mem_ptr = ioremap(mem_base + (off & PAGE_MASK), PAGE_SIZE); if (*mem_ptr) addr = *mem_ptr + (off & (PAGE_SIZE - 1)); return addr; } static int netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data) { unsigned long flags; void __iomem *addr, *mem_ptr = NULL; addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); if (!addr) return -EIO; if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ netxen_nic_io_write_128M(adapter, addr, data); } else { /* Window 0 */ write_lock_irqsave(&adapter->ahw.crb_lock, flags); netxen_nic_pci_set_crbwindow_128M(adapter, 0); writel(data, addr); netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); } if (mem_ptr) iounmap(mem_ptr); return 0; } static u32 netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off) { unsigned long flags; void __iomem *addr, *mem_ptr = NULL; u32 data; addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); if (!addr) return -EIO; if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ data = netxen_nic_io_read_128M(adapter, addr); } else { /* Window 0 */ write_lock_irqsave(&adapter->ahw.crb_lock, flags); netxen_nic_pci_set_crbwindow_128M(adapter, 0); data = readl(addr); netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); } if (mem_ptr) iounmap(mem_ptr); return data; } static int netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data) { unsigned long flags; int rv; void __iomem *addr = NULL; rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); if (rv == 0) { writel(data, addr); return 0; } if (rv > 0) { /* indirect access */ write_lock_irqsave(&adapter->ahw.crb_lock, flags); crb_win_lock(adapter); netxen_nic_pci_set_crbwindow_2M(adapter, off); writel(data, addr); crb_win_unlock(adapter); write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); return 0; } dev_err(&adapter->pdev->dev, "%s: invalid offset: 0x%016lx\n", __func__, off); dump_stack(); return -EIO; } static u32 netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off) { unsigned long flags; int rv; u32 data; void __iomem *addr = NULL; rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); if (rv == 0) return readl(addr); if (rv > 0) { /* indirect access */ write_lock_irqsave(&adapter->ahw.crb_lock, flags); crb_win_lock(adapter); netxen_nic_pci_set_crbwindow_2M(adapter, off); data = readl(addr); crb_win_unlock(adapter); write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); return data; } dev_err(&adapter->pdev->dev, "%s: invalid offset: 0x%016lx\n", __func__, off); dump_stack(); return -1; } /* window 1 registers only */ static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, void __iomem *addr, u32 data) { read_lock(&adapter->ahw.crb_lock); writel(data, addr); read_unlock(&adapter->ahw.crb_lock); } static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, void __iomem *addr) { u32 val; read_lock(&adapter->ahw.crb_lock); val = readl(addr); read_unlock(&adapter->ahw.crb_lock); return val; } static void netxen_nic_io_write_2M(struct netxen_adapter *adapter, void __iomem *addr, u32 data) { writel(data, addr); } static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter, void __iomem *addr) { return readl(addr); } void __iomem * netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset) { void __iomem *addr = NULL; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { if ((offset < NETXEN_CRB_PCIX_HOST2) && (offset > NETXEN_CRB_PCIX_HOST)) addr = PCI_OFFSET_SECOND_RANGE(adapter, offset); else addr = NETXEN_CRB_NORMALIZE(adapter, offset); } else { WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter, offset, &addr)); } return addr; } static int netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter, u64 addr, u32 *start) { if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { *start = (addr - NETXEN_ADDR_OCM0 + NETXEN_PCI_OCM0); return 0; } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { *start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1); return 0; } return -EIO; } static int netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter, u64 addr, u32 *start) { u32 window; window = OCM_WIN(addr); writel(window, adapter->ahw.ocm_win_crb); /* read back to flush */ readl(adapter->ahw.ocm_win_crb); adapter->ahw.ocm_win = window; *start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); return 0; } static int netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off, u64 *data, int op) { void __iomem *addr, *mem_ptr = NULL; resource_size_t mem_base; int ret; u32 start; spin_lock(&adapter->ahw.mem_lock); ret = adapter->pci_set_window(adapter, off, &start); if (ret != 0) goto unlock; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { addr = adapter->ahw.pci_base0 + start; } else { addr = pci_base_offset(adapter, start); if (addr) goto noremap; mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK); mem_ptr = ioremap(mem_base, PAGE_SIZE); if (mem_ptr == NULL) { ret = -EIO; goto unlock; } addr = mem_ptr + (start & (PAGE_SIZE-1)); } noremap: if (op == 0) /* read */ *data = readq(addr); else /* write */ writeq(*data, addr); unlock: spin_unlock(&adapter->ahw.mem_lock); if (mem_ptr) iounmap(mem_ptr); return ret; } void netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data) { void __iomem *addr = adapter->ahw.pci_base0 + NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); spin_lock(&adapter->ahw.mem_lock); *data = readq(addr); spin_unlock(&adapter->ahw.mem_lock); } void netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data) { void __iomem *addr = adapter->ahw.pci_base0 + NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); spin_lock(&adapter->ahw.mem_lock); writeq(data, addr); spin_unlock(&adapter->ahw.mem_lock); } #define MAX_CTL_CHECK 1000 static int netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter, u64 off, u64 data) { int j, ret; u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; void __iomem *mem_crb; /* Only 64-bit aligned access */ if (off & 7) return -EIO; /* P2 has different SIU and MIU test agent base addr */ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P2)) { mem_crb = pci_base_offset(adapter, NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); addr_hi = SIU_TEST_AGT_ADDR_HI; data_lo = SIU_TEST_AGT_WRDATA_LO; data_hi = SIU_TEST_AGT_WRDATA_HI; off_lo = off & SIU_TEST_AGT_ADDR_MASK; off_hi = SIU_TEST_AGT_UPPER_ADDR(off); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { mem_crb = pci_base_offset(adapter, NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); addr_hi = MIU_TEST_AGT_ADDR_HI; data_lo = MIU_TEST_AGT_WRDATA_LO; data_hi = MIU_TEST_AGT_WRDATA_HI; off_lo = off & MIU_TEST_AGT_ADDR_MASK; off_hi = 0; goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { if (adapter->ahw.pci_len0 != 0) { return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1); } } return -EIO; correct: spin_lock(&adapter->ahw.mem_lock); netxen_nic_pci_set_crbwindow_128M(adapter, 0); writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); writel(off_hi, (mem_crb + addr_hi)); writel(data & 0xffffffff, (mem_crb + data_lo)); writel((data >> 32) & 0xffffffff, (mem_crb + data_hi)); writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = readl((mem_crb + TEST_AGT_CTRL)); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to write through agent\n"); ret = -EIO; } else ret = 0; netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); spin_unlock(&adapter->ahw.mem_lock); return ret; } static int netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter, u64 off, u64 *data) { int j, ret; u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; u64 val; void __iomem *mem_crb; /* Only 64-bit aligned access */ if (off & 7) return -EIO; /* P2 has different SIU and MIU test agent base addr */ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P2)) { mem_crb = pci_base_offset(adapter, NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); addr_hi = SIU_TEST_AGT_ADDR_HI; data_lo = SIU_TEST_AGT_RDDATA_LO; data_hi = SIU_TEST_AGT_RDDATA_HI; off_lo = off & SIU_TEST_AGT_ADDR_MASK; off_hi = SIU_TEST_AGT_UPPER_ADDR(off); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { mem_crb = pci_base_offset(adapter, NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); addr_hi = MIU_TEST_AGT_ADDR_HI; data_lo = MIU_TEST_AGT_RDDATA_LO; data_hi = MIU_TEST_AGT_RDDATA_HI; off_lo = off & MIU_TEST_AGT_ADDR_MASK; off_hi = 0; goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { if (adapter->ahw.pci_len0 != 0) { return netxen_nic_pci_mem_access_direct(adapter, off, data, 0); } } return -EIO; correct: spin_lock(&adapter->ahw.mem_lock); netxen_nic_pci_set_crbwindow_128M(adapter, 0); writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); writel(off_hi, (mem_crb + addr_hi)); writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = readl(mem_crb + TEST_AGT_CTRL); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to read through agent\n"); ret = -EIO; } else { temp = readl(mem_crb + data_hi); val = ((u64)temp << 32); val |= readl(mem_crb + data_lo); *data = val; ret = 0; } netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); spin_unlock(&adapter->ahw.mem_lock); return ret; } static int netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter, u64 off, u64 data) { int j, ret; u32 temp, off8; void __iomem *mem_crb; /* Only 64-bit aligned access */ if (off & 7) return -EIO; /* P3 onward, test agent base for MIU and SIU is same */ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P3)) { mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1); return -EIO; correct: off8 = off & 0xfffffff8; spin_lock(&adapter->ahw.mem_lock); writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); writel(data & 0xffffffff, mem_crb + MIU_TEST_AGT_WRDATA_LO); writel((data >> 32) & 0xffffffff, mem_crb + MIU_TEST_AGT_WRDATA_HI); writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = readl(mem_crb + TEST_AGT_CTRL); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to write through agent\n"); ret = -EIO; } else ret = 0; spin_unlock(&adapter->ahw.mem_lock); return ret; } static int netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data) { int j, ret; u32 temp, off8; u64 val; void __iomem *mem_crb; /* Only 64-bit aligned access */ if (off & 7) return -EIO; /* P3 onward, test agent base for MIU and SIU is same */ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P3)) { mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); goto correct; } if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { return netxen_nic_pci_mem_access_direct(adapter, off, data, 0); } return -EIO; correct: off8 = off & 0xfffffff8; spin_lock(&adapter->ahw.mem_lock); writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = readl(mem_crb + TEST_AGT_CTRL); if ((temp & TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&adapter->pdev->dev, "failed to read through agent\n"); ret = -EIO; } else { val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32; val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO); *data = val; ret = 0; } spin_unlock(&adapter->ahw.mem_lock); return ret; } void netxen_setup_hwops(struct netxen_adapter *adapter) { adapter->init_port = netxen_niu_xg_init_port; adapter->stop_port = netxen_niu_disable_xg_port; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { adapter->crb_read = netxen_nic_hw_read_wx_128M, adapter->crb_write = netxen_nic_hw_write_wx_128M, adapter->pci_set_window = netxen_nic_pci_set_window_128M, adapter->pci_mem_read = netxen_nic_pci_mem_read_128M, adapter->pci_mem_write = netxen_nic_pci_mem_write_128M, adapter->io_read = netxen_nic_io_read_128M, adapter->io_write = netxen_nic_io_write_128M, adapter->macaddr_set = netxen_p2_nic_set_mac_addr; adapter->set_multi = netxen_p2_nic_set_multi; adapter->set_mtu = netxen_nic_set_mtu_xgb; adapter->set_promisc = netxen_p2_nic_set_promisc; } else { adapter->crb_read = netxen_nic_hw_read_wx_2M, adapter->crb_write = netxen_nic_hw_write_wx_2M, adapter->pci_set_window = netxen_nic_pci_set_window_2M, adapter->pci_mem_read = netxen_nic_pci_mem_read_2M, adapter->pci_mem_write = netxen_nic_pci_mem_write_2M, adapter->io_read = netxen_nic_io_read_2M, adapter->io_write = netxen_nic_io_write_2M, adapter->set_mtu = nx_fw_cmd_set_mtu; adapter->set_promisc = netxen_p3_nic_set_promisc; adapter->macaddr_set = netxen_p3_nic_set_mac_addr; adapter->set_multi = netxen_p3_nic_set_multi; adapter->phy_read = nx_fw_cmd_query_phy; adapter->phy_write = nx_fw_cmd_set_phy; } } int netxen_nic_get_board_info(struct netxen_adapter *adapter) { int offset, board_type, magic; struct pci_dev *pdev = adapter->pdev; offset = NX_FW_MAGIC_OFFSET; if (netxen_rom_fast_read(adapter, offset, &magic)) return -EIO; if (magic != NETXEN_BDINFO_MAGIC) { dev_err(&pdev->dev, "invalid board config, magic=%08x\n", magic); return -EIO; } offset = NX_BRDTYPE_OFFSET; if (netxen_rom_fast_read(adapter, offset, &board_type)) return -EIO; if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) { u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I); if ((gpio & 0x8000) == 0) board_type = NETXEN_BRDTYPE_P3_10G_TP; } adapter->ahw.board_type = board_type; switch (board_type) { case NETXEN_BRDTYPE_P2_SB35_4G: adapter->ahw.port_type = NETXEN_NIC_GBE; break; case NETXEN_BRDTYPE_P2_SB31_10G: case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: case NETXEN_BRDTYPE_P2_SB31_10G_CX4: case NETXEN_BRDTYPE_P3_HMEZ: case NETXEN_BRDTYPE_P3_XG_LOM: case NETXEN_BRDTYPE_P3_10G_CX4: case NETXEN_BRDTYPE_P3_10G_CX4_LP: case NETXEN_BRDTYPE_P3_IMEZ: case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: case NETXEN_BRDTYPE_P3_10G_SFP_CT: case NETXEN_BRDTYPE_P3_10G_SFP_QT: case NETXEN_BRDTYPE_P3_10G_XFP: case NETXEN_BRDTYPE_P3_10000_BASE_T: adapter->ahw.port_type = NETXEN_NIC_XGBE; break; case NETXEN_BRDTYPE_P1_BD: case NETXEN_BRDTYPE_P1_SB: case NETXEN_BRDTYPE_P1_SMAX: case NETXEN_BRDTYPE_P1_SOCK: case NETXEN_BRDTYPE_P3_REF_QG: case NETXEN_BRDTYPE_P3_4_GB: case NETXEN_BRDTYPE_P3_4_GB_MM: adapter->ahw.port_type = NETXEN_NIC_GBE; break; case NETXEN_BRDTYPE_P3_10G_TP: adapter->ahw.port_type = (adapter->portnum < 2) ? NETXEN_NIC_XGBE : NETXEN_NIC_GBE; break; default: dev_err(&pdev->dev, "unknown board type %x\n", board_type); adapter->ahw.port_type = NETXEN_NIC_XGBE; break; } return 0; } /* NIU access sections */ static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) { new_mtu += MTU_FUDGE_FACTOR; if (adapter->physical_port == 0) NXWR32(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu); else NXWR32(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE, new_mtu); return 0; } void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) { __u32 status; __u32 autoneg; __u32 port_mode; if (!netif_carrier_ok(adapter->netdev)) { adapter->link_speed = 0; adapter->link_duplex = -1; adapter->link_autoneg = AUTONEG_ENABLE; return; } if (adapter->ahw.port_type == NETXEN_NIC_GBE) { port_mode = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); if (port_mode == NETXEN_PORT_MODE_802_3_AP) { adapter->link_speed = SPEED_1000; adapter->link_duplex = DUPLEX_FULL; adapter->link_autoneg = AUTONEG_DISABLE; return; } if (adapter->phy_read && adapter->phy_read(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, &status) == 0) { if (netxen_get_phy_link(status)) { switch (netxen_get_phy_speed(status)) { case 0: adapter->link_speed = SPEED_10; break; case 1: adapter->link_speed = SPEED_100; break; case 2: adapter->link_speed = SPEED_1000; break; default: adapter->link_speed = 0; break; } switch (netxen_get_phy_duplex(status)) { case 0: adapter->link_duplex = DUPLEX_HALF; break; case 1: adapter->link_duplex = DUPLEX_FULL; break; default: adapter->link_duplex = -1; break; } if (adapter->phy_read && adapter->phy_read(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, &autoneg) == 0) adapter->link_autoneg = autoneg; } else goto link_down; } else { link_down: adapter->link_speed = 0; adapter->link_duplex = -1; } } } int netxen_nic_wol_supported(struct netxen_adapter *adapter) { u32 wol_cfg; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 0; wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); if (wol_cfg & (1UL << adapter->portnum)) { wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); if (wol_cfg & (1 << adapter->portnum)) return 1; } return 0; } static u32 netxen_md_cntrl(struct netxen_adapter *adapter, struct netxen_minidump_template_hdr *template_hdr, struct netxen_minidump_entry_crb *crtEntry) { int loop_cnt, i, rv = 0, timeout_flag; u32 op_count, stride; u32 opcode, read_value, addr; unsigned long timeout, timeout_jiffies; addr = crtEntry->addr; op_count = crtEntry->op_count; stride = crtEntry->addr_stride; for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { for (i = 0; i < sizeof(crtEntry->opcode) * 8; i++) { opcode = (crtEntry->opcode & (0x1 << i)); if (opcode) { switch (opcode) { case NX_DUMP_WCRB: NX_WR_DUMP_REG(addr, adapter->ahw.pci_base0, crtEntry->value_1); break; case NX_DUMP_RWCRB: NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); NX_WR_DUMP_REG(addr, adapter->ahw.pci_base0, read_value); break; case NX_DUMP_ANDCRB: NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); read_value &= crtEntry->value_2; NX_WR_DUMP_REG(addr, adapter->ahw.pci_base0, read_value); break; case NX_DUMP_ORCRB: NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); read_value |= crtEntry->value_3; NX_WR_DUMP_REG(addr, adapter->ahw.pci_base0, read_value); break; case NX_DUMP_POLLCRB: timeout = crtEntry->poll_timeout; NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); timeout_jiffies = msecs_to_jiffies(timeout) + jiffies; for (timeout_flag = 0; !timeout_flag && ((read_value & crtEntry->value_2) != crtEntry->value_1);) { if (time_after(jiffies, timeout_jiffies)) timeout_flag = 1; NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); } if (timeout_flag) { dev_err(&adapter->pdev->dev, "%s : " "Timeout in poll_crb control operation.\n" , __func__); return -1; } break; case NX_DUMP_RD_SAVE: /* Decide which address to use */ if (crtEntry->state_index_a) addr = template_hdr->saved_state_array [crtEntry->state_index_a]; NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); template_hdr->saved_state_array [crtEntry->state_index_v] = read_value; break; case NX_DUMP_WRT_SAVED: /* Decide which value to use */ if (crtEntry->state_index_v) read_value = template_hdr->saved_state_array [crtEntry->state_index_v]; else read_value = crtEntry->value_1; /* Decide which address to use */ if (crtEntry->state_index_a) addr = template_hdr->saved_state_array [crtEntry->state_index_a]; NX_WR_DUMP_REG(addr, adapter->ahw.pci_base0, read_value); break; case NX_DUMP_MOD_SAVE_ST: read_value = template_hdr->saved_state_array [crtEntry->state_index_v]; read_value <<= crtEntry->shl; read_value >>= crtEntry->shr; if (crtEntry->value_2) read_value &= crtEntry->value_2; read_value |= crtEntry->value_3; read_value += crtEntry->value_1; /* Write value back to state area.*/ template_hdr->saved_state_array [crtEntry->state_index_v] = read_value; break; default: rv = 1; break; } } } addr = addr + stride; } return rv; } /* Read memory or MN */ static u32 netxen_md_rdmem(struct netxen_adapter *adapter, struct netxen_minidump_entry_rdmem *memEntry, u64 *data_buff) { u64 addr, value = 0; int i = 0, loop_cnt; addr = (u64)memEntry->read_addr; loop_cnt = memEntry->read_data_size; /* This is size in bytes */ loop_cnt /= sizeof(value); for (i = 0; i < loop_cnt; i++) { if (netxen_nic_pci_mem_read_2M(adapter, addr, &value)) goto out; *data_buff++ = value; addr += sizeof(value); } out: return i * sizeof(value); } /* Read CRB operation */ static u32 netxen_md_rd_crb(struct netxen_adapter *adapter, struct netxen_minidump_entry_crb *crbEntry, u32 *data_buff) { int loop_cnt; u32 op_count, addr, stride, value; addr = crbEntry->addr; op_count = crbEntry->op_count; stride = crbEntry->addr_stride; for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &value); *data_buff++ = addr; *data_buff++ = value; addr = addr + stride; } return loop_cnt * (2 * sizeof(u32)); } /* Read ROM */ static u32 netxen_md_rdrom(struct netxen_adapter *adapter, struct netxen_minidump_entry_rdrom *romEntry, __le32 *data_buff) { int i, count = 0; u32 size, lck_val; u32 val; u32 fl_addr, waddr, raddr; fl_addr = romEntry->read_addr; size = romEntry->read_data_size/4; lock_try: lck_val = readl((void __iomem *)(adapter->ahw.pci_base0 + NX_FLASH_SEM2_LK)); if (!lck_val && count < MAX_CTL_CHECK) { msleep(20); count++; goto lock_try; } writel(adapter->ahw.pci_func, (void __iomem *)(adapter->ahw.pci_base0 + NX_FLASH_LOCK_ID)); for (i = 0; i < size; i++) { waddr = fl_addr & 0xFFFF0000; NX_WR_DUMP_REG(FLASH_ROM_WINDOW, adapter->ahw.pci_base0, waddr); raddr = FLASH_ROM_DATA + (fl_addr & 0x0000FFFF); NX_RD_DUMP_REG(raddr, adapter->ahw.pci_base0, &val); *data_buff++ = cpu_to_le32(val); fl_addr += sizeof(val); } readl((void __iomem *)(adapter->ahw.pci_base0 + NX_FLASH_SEM2_ULK)); return romEntry->read_data_size; } /* Handle L2 Cache */ static u32 netxen_md_L2Cache(struct netxen_adapter *adapter, struct netxen_minidump_entry_cache *cacheEntry, u32 *data_buff) { int loop_cnt, i, k, timeout_flag = 0; u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr; u32 tag_value, read_cnt; u8 cntl_value_w, cntl_value_r; unsigned long timeout, timeout_jiffies; loop_cnt = cacheEntry->op_count; read_addr = cacheEntry->read_addr; cntrl_addr = cacheEntry->control_addr; cntl_value_w = (u32) cacheEntry->write_value; tag_reg_addr = cacheEntry->tag_reg_addr; tag_value = cacheEntry->init_tag_value; read_cnt = cacheEntry->read_addr_cnt; for (i = 0; i < loop_cnt; i++) { NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value); if (cntl_value_w) NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0, (u32)cntl_value_w); if (cacheEntry->poll_mask) { timeout = cacheEntry->poll_wait; NX_RD_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0, &cntl_value_r); timeout_jiffies = msecs_to_jiffies(timeout) + jiffies; for (timeout_flag = 0; !timeout_flag && ((cntl_value_r & cacheEntry->poll_mask) != 0);) { if (time_after(jiffies, timeout_jiffies)) timeout_flag = 1; NX_RD_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0, &cntl_value_r); } if (timeout_flag) { dev_err(&adapter->pdev->dev, "Timeout in processing L2 Tag poll.\n"); return -1; } } addr = read_addr; for (k = 0; k < read_cnt; k++) { NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); *data_buff++ = read_value; addr += cacheEntry->read_addr_stride; } tag_value += cacheEntry->tag_value_stride; } return read_cnt * loop_cnt * sizeof(read_value); } /* Handle L1 Cache */ static u32 netxen_md_L1Cache(struct netxen_adapter *adapter, struct netxen_minidump_entry_cache *cacheEntry, u32 *data_buff) { int i, k, loop_cnt; u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr; u32 tag_value, read_cnt; u8 cntl_value_w; loop_cnt = cacheEntry->op_count; read_addr = cacheEntry->read_addr; cntrl_addr = cacheEntry->control_addr; cntl_value_w = (u32) cacheEntry->write_value; tag_reg_addr = cacheEntry->tag_reg_addr; tag_value = cacheEntry->init_tag_value; read_cnt = cacheEntry->read_addr_cnt; for (i = 0; i < loop_cnt; i++) { NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value); NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0, (u32) cntl_value_w); addr = read_addr; for (k = 0; k < read_cnt; k++) { NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &read_value); *data_buff++ = read_value; addr += cacheEntry->read_addr_stride; } tag_value += cacheEntry->tag_value_stride; } return read_cnt * loop_cnt * sizeof(read_value); } /* Reading OCM memory */ static u32 netxen_md_rdocm(struct netxen_adapter *adapter, struct netxen_minidump_entry_rdocm *ocmEntry, u32 *data_buff) { int i, loop_cnt; u32 value; void __iomem *addr; addr = (ocmEntry->read_addr + adapter->ahw.pci_base0); loop_cnt = ocmEntry->op_count; for (i = 0; i < loop_cnt; i++) { value = readl(addr); *data_buff++ = value; addr += ocmEntry->read_addr_stride; } return i * sizeof(u32); } /* Read MUX data */ static u32 netxen_md_rdmux(struct netxen_adapter *adapter, struct netxen_minidump_entry_mux *muxEntry, u32 *data_buff) { int loop_cnt = 0; u32 read_addr, read_value, select_addr, sel_value; read_addr = muxEntry->read_addr; sel_value = muxEntry->select_value; select_addr = muxEntry->select_addr; for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) { NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, sel_value); NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, &read_value); *data_buff++ = sel_value; *data_buff++ = read_value; sel_value += muxEntry->select_value_stride; } return loop_cnt * (2 * sizeof(u32)); } /* Handling Queue State Reads */ static u32 netxen_md_rdqueue(struct netxen_adapter *adapter, struct netxen_minidump_entry_queue *queueEntry, u32 *data_buff) { int loop_cnt, k; u32 queue_id, read_addr, read_value, read_stride, select_addr, read_cnt; read_cnt = queueEntry->read_addr_cnt; read_stride = queueEntry->read_addr_stride; select_addr = queueEntry->select_addr; for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count; loop_cnt++) { NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, queue_id); read_addr = queueEntry->read_addr; for (k = 0; k < read_cnt; k++) { NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, &read_value); *data_buff++ = read_value; read_addr += read_stride; } queue_id += queueEntry->queue_id_stride; } return loop_cnt * (read_cnt * sizeof(read_value)); } /* * We catch an error where driver does not read * as much data as we expect from the entry. */ static int netxen_md_entry_err_chk(struct netxen_adapter *adapter, struct netxen_minidump_entry *entry, int esize) { if (esize < 0) { entry->hdr.driver_flags |= NX_DUMP_SKIP; return esize; } if (esize != entry->hdr.entry_capture_size) { entry->hdr.entry_capture_size = esize; entry->hdr.driver_flags |= NX_DUMP_SIZE_ERR; dev_info(&adapter->pdev->dev, "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n", entry->hdr.entry_type, entry->hdr.entry_capture_mask, esize, entry->hdr.entry_capture_size); dev_info(&adapter->pdev->dev, "Aborting further dump capture\n"); } return 0; } static int netxen_parse_md_template(struct netxen_adapter *adapter) { int num_of_entries, buff_level, e_cnt, esize; int rv = 0, sane_start = 0, sane_end = 0; char *dbuff; void *template_buff = adapter->mdump.md_template; char *dump_buff = adapter->mdump.md_capture_buff; int capture_mask = adapter->mdump.md_capture_mask; struct netxen_minidump_template_hdr *template_hdr; struct netxen_minidump_entry *entry; if ((capture_mask & 0x3) != 0x3) { dev_err(&adapter->pdev->dev, "Capture mask %02x below minimum needed " "for valid firmware dump\n", capture_mask); return -EINVAL; } template_hdr = (struct netxen_minidump_template_hdr *) template_buff; num_of_entries = template_hdr->num_of_entries; entry = (struct netxen_minidump_entry *) ((char *) template_buff + template_hdr->first_entry_offset); memcpy(dump_buff, template_buff, adapter->mdump.md_template_size); dump_buff = dump_buff + adapter->mdump.md_template_size; if (template_hdr->entry_type == TLHDR) sane_start = 1; for (e_cnt = 0, buff_level = 0; e_cnt < num_of_entries; e_cnt++) { if (!(entry->hdr.entry_capture_mask & capture_mask)) { entry->hdr.driver_flags |= NX_DUMP_SKIP; entry = (struct netxen_minidump_entry *) ((char *) entry + entry->hdr.entry_size); continue; } switch (entry->hdr.entry_type) { case RDNOP: entry->hdr.driver_flags |= NX_DUMP_SKIP; break; case RDEND: entry->hdr.driver_flags |= NX_DUMP_SKIP; sane_end += 1; break; case CNTRL: rv = netxen_md_cntrl(adapter, template_hdr, (void *)entry); if (rv) entry->hdr.driver_flags |= NX_DUMP_SKIP; break; case RDCRB: dbuff = dump_buff + buff_level; esize = netxen_md_rd_crb(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; case RDMN: case RDMEM: dbuff = dump_buff + buff_level; esize = netxen_md_rdmem(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; case BOARD: case RDROM: dbuff = dump_buff + buff_level; esize = netxen_md_rdrom(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; case L2ITG: case L2DTG: case L2DAT: case L2INS: dbuff = dump_buff + buff_level; esize = netxen_md_L2Cache(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; case L1DAT: case L1INS: dbuff = dump_buff + buff_level; esize = netxen_md_L1Cache(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; case RDOCM: dbuff = dump_buff + buff_level; esize = netxen_md_rdocm(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; case RDMUX: dbuff = dump_buff + buff_level; esize = netxen_md_rdmux(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; case QUEUE: dbuff = dump_buff + buff_level; esize = netxen_md_rdqueue(adapter, (void *) entry, (void *) dbuff); rv = netxen_md_entry_err_chk (adapter, entry, esize); if (rv < 0) break; buff_level += esize; break; default: entry->hdr.driver_flags |= NX_DUMP_SKIP; break; } /* Next entry in the template */ entry = (struct netxen_minidump_entry *) ((char *) entry + entry->hdr.entry_size); } if (!sane_start || sane_end > 1) { dev_err(&adapter->pdev->dev, "Firmware minidump template configuration error.\n"); } return 0; } static int netxen_collect_minidump(struct netxen_adapter *adapter) { int ret = 0; struct netxen_minidump_template_hdr *hdr; hdr = (struct netxen_minidump_template_hdr *) adapter->mdump.md_template; hdr->driver_capture_mask = adapter->mdump.md_capture_mask; hdr->driver_timestamp = ktime_get_seconds(); hdr->driver_info_word2 = adapter->fw_version; hdr->driver_info_word3 = NXRD32(adapter, CRB_DRIVER_VERSION); ret = netxen_parse_md_template(adapter); if (ret) return ret; return ret; } void netxen_dump_fw(struct netxen_adapter *adapter) { struct netxen_minidump_template_hdr *hdr; int i, k, data_size = 0; u32 capture_mask; hdr = (struct netxen_minidump_template_hdr *) adapter->mdump.md_template; capture_mask = adapter->mdump.md_capture_mask; for (i = 0x2, k = 1; (i & NX_DUMP_MASK_MAX); i <<= 1, k++) { if (i & capture_mask) data_size += hdr->capture_size_array[k]; } if (!data_size) { dev_err(&adapter->pdev->dev, "Invalid cap sizes for capture_mask=0x%x\n", adapter->mdump.md_capture_mask); return; } adapter->mdump.md_capture_size = data_size; adapter->mdump.md_dump_size = adapter->mdump.md_template_size + adapter->mdump.md_capture_size; if (!adapter->mdump.md_capture_buff) { adapter->mdump.md_capture_buff = vzalloc(adapter->mdump.md_dump_size); if (!adapter->mdump.md_capture_buff) return; if (netxen_collect_minidump(adapter)) { adapter->mdump.has_valid_dump = 0; adapter->mdump.md_dump_size = 0; vfree(adapter->mdump.md_capture_buff); adapter->mdump.md_capture_buff = NULL; dev_err(&adapter->pdev->dev, "Error in collecting firmware minidump.\n"); } else { adapter->mdump.md_timestamp = jiffies; adapter->mdump.has_valid_dump = 1; adapter->fw_mdump_rdy = 1; dev_info(&adapter->pdev->dev, "%s Successfully " "collected fw dump.\n", adapter->netdev->name); } } else { dev_info(&adapter->pdev->dev, "Cannot overwrite previously collected " "firmware minidump.\n"); adapter->fw_mdump_rdy = 1; return; } }
linux-master
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2003 - 2009 NetXen, Inc. * Copyright (C) 2009 - QLogic Corporation. * All rights reserved. */ #include "netxen_nic_hw.h" #include "netxen_nic.h" #define NXHAL_VERSION 1 static u32 netxen_poll_rsp(struct netxen_adapter *adapter) { u32 rsp = NX_CDRP_RSP_OK; int timeout = 0; do { /* give atleast 1ms for firmware to respond */ msleep(1); if (++timeout > NX_OS_CRB_RETRY_COUNT) return NX_CDRP_RSP_TIMEOUT; rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET); } while (!NX_CDRP_IS_RSP(rsp)); return rsp; } static u32 netxen_issue_cmd(struct netxen_adapter *adapter, struct netxen_cmd_args *cmd) { u32 rsp; u32 signature = 0; u32 rcode = NX_RCODE_SUCCESS; signature = NX_CDRP_SIGNATURE_MAKE(adapter->ahw.pci_func, NXHAL_VERSION); /* Acquire semaphore before accessing CRB */ if (netxen_api_lock(adapter)) return NX_RCODE_TIMEOUT; NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature); NXWR32(adapter, NX_ARG1_CRB_OFFSET, cmd->req.arg1); NXWR32(adapter, NX_ARG2_CRB_OFFSET, cmd->req.arg2); NXWR32(adapter, NX_ARG3_CRB_OFFSET, cmd->req.arg3); NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd->req.cmd)); rsp = netxen_poll_rsp(adapter); if (rsp == NX_CDRP_RSP_TIMEOUT) { printk(KERN_ERR "%s: card response timeout.\n", netxen_nic_driver_name); rcode = NX_RCODE_TIMEOUT; } else if (rsp == NX_CDRP_RSP_FAIL) { rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET); printk(KERN_ERR "%s: failed card response code:0x%x\n", netxen_nic_driver_name, rcode); } else if (rsp == NX_CDRP_RSP_OK) { cmd->rsp.cmd = NX_RCODE_SUCCESS; if (cmd->rsp.arg2) cmd->rsp.arg2 = NXRD32(adapter, NX_ARG2_CRB_OFFSET); if (cmd->rsp.arg3) cmd->rsp.arg3 = NXRD32(adapter, NX_ARG3_CRB_OFFSET); } if (cmd->rsp.arg1) cmd->rsp.arg1 = NXRD32(adapter, NX_ARG1_CRB_OFFSET); /* Release semaphore */ netxen_api_unlock(adapter); return rcode; } static int netxen_get_minidump_template_size(struct netxen_adapter *adapter) { struct netxen_cmd_args cmd; memset(&cmd, 0, sizeof(cmd)); cmd.req.cmd = NX_CDRP_CMD_TEMP_SIZE; memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd)); netxen_issue_cmd(adapter, &cmd); if (cmd.rsp.cmd != NX_RCODE_SUCCESS) { dev_info(&adapter->pdev->dev, "Can't get template size %d\n", cmd.rsp.cmd); return -EIO; } adapter->mdump.md_template_size = cmd.rsp.arg2; adapter->mdump.md_template_ver = cmd.rsp.arg3; return 0; } static int netxen_get_minidump_template(struct netxen_adapter *adapter) { dma_addr_t md_template_addr; void *addr; u32 size; struct netxen_cmd_args cmd; size = adapter->mdump.md_template_size; if (size == 0) { dev_err(&adapter->pdev->dev, "Can not capture Minidump " "template. Invalid template size.\n"); return NX_RCODE_INVALID_ARGS; } addr = dma_alloc_coherent(&adapter->pdev->dev, size, &md_template_addr, GFP_KERNEL); if (!addr) { dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n"); return -ENOMEM; } memset(&cmd, 0, sizeof(cmd)); memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd)); cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR; cmd.req.arg1 = LSD(md_template_addr); cmd.req.arg2 = MSD(md_template_addr); cmd.req.arg3 |= size; netxen_issue_cmd(adapter, &cmd); if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) { memcpy(adapter->mdump.md_template, addr, size); } else { dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n", cmd.rsp.cmd, size, cmd.rsp.arg2); } dma_free_coherent(&adapter->pdev->dev, size, addr, md_template_addr); return 0; } static u32 netxen_check_template_checksum(struct netxen_adapter *adapter) { u64 sum = 0 ; u32 *buff = adapter->mdump.md_template; int count = adapter->mdump.md_template_size/sizeof(uint32_t) ; while (count-- > 0) sum += *buff++ ; while (sum >> 32) sum = (sum & 0xFFFFFFFF) + (sum >> 32) ; return ~sum; } int netxen_setup_minidump(struct netxen_adapter *adapter) { int err = 0, i; u32 *template, *tmp_buf; err = netxen_get_minidump_template_size(adapter); if (err) { adapter->mdump.fw_supports_md = 0; if ((err == NX_RCODE_CMD_INVALID) || (err == NX_RCODE_CMD_NOT_IMPL)) { dev_info(&adapter->pdev->dev, "Flashed firmware version does not support minidump, minimum version required is [ %u.%u.%u ]\n", NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR, NX_MD_SUPPORT_SUBVERSION); } return err; } if (!adapter->mdump.md_template_size) { dev_err(&adapter->pdev->dev, "Error : Invalid template size " ",should be non-zero.\n"); return -EIO; } adapter->mdump.md_template = kmalloc(adapter->mdump.md_template_size, GFP_KERNEL); if (!adapter->mdump.md_template) return -ENOMEM; err = netxen_get_minidump_template(adapter); if (err) { if (err == NX_RCODE_CMD_NOT_IMPL) adapter->mdump.fw_supports_md = 0; goto free_template; } if (netxen_check_template_checksum(adapter)) { dev_err(&adapter->pdev->dev, "Minidump template checksum Error\n"); err = -EIO; goto free_template; } adapter->mdump.md_capture_mask = NX_DUMP_MASK_DEF; tmp_buf = (u32 *) adapter->mdump.md_template; template = (u32 *) adapter->mdump.md_template; for (i = 0; i < adapter->mdump.md_template_size/sizeof(u32); i++) *template++ = __le32_to_cpu(*tmp_buf++); adapter->mdump.md_capture_buff = NULL; adapter->mdump.fw_supports_md = 1; adapter->mdump.md_enabled = 0; return err; free_template: kfree(adapter->mdump.md_template); adapter->mdump.md_template = NULL; return err; } int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) { u32 rcode = NX_RCODE_SUCCESS; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; struct netxen_cmd_args cmd; memset(&cmd, 0, sizeof(cmd)); cmd.req.cmd = NX_CDRP_CMD_SET_MTU; cmd.req.arg1 = recv_ctx->context_id; cmd.req.arg2 = mtu; cmd.req.arg3 = 0; if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) rcode = netxen_issue_cmd(adapter, &cmd); if (rcode != NX_RCODE_SUCCESS) return -EIO; return 0; } int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter, u32 speed, u32 duplex, u32 autoneg) { struct netxen_cmd_args cmd; memset(&cmd, 0, sizeof(cmd)); cmd.req.cmd = NX_CDRP_CMD_CONFIG_GBE_PORT; cmd.req.arg1 = speed; cmd.req.arg2 = duplex; cmd.req.arg3 = autoneg; return netxen_issue_cmd(adapter, &cmd); } static int nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) { void *addr; nx_hostrq_rx_ctx_t *prq; nx_cardrsp_rx_ctx_t *prsp; nx_hostrq_rds_ring_t *prq_rds; nx_hostrq_sds_ring_t *prq_sds; nx_cardrsp_rds_ring_t *prsp_rds; nx_cardrsp_sds_ring_t *prsp_sds; struct nx_host_rds_ring *rds_ring; struct nx_host_sds_ring *sds_ring; struct netxen_cmd_args cmd; dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; u64 phys_addr; int i, nrds_rings, nsds_rings; size_t rq_size, rsp_size; u32 cap, reg, val; int err; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; nrds_rings = adapter->max_rds_rings; nsds_rings = adapter->max_sds_rings; rq_size = SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings); rsp_size = SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings); addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, &hostrq_phys_addr, GFP_KERNEL); if (addr == NULL) return -ENOMEM; prq = addr; addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, &cardrsp_phys_addr, GFP_KERNEL); if (addr == NULL) { err = -ENOMEM; goto out_free_rq; } prsp = addr; prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN); cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS); if (adapter->flags & NETXEN_FW_MSS_CAP) cap |= NX_CAP0_HW_LRO_MSS; prq->capabilities[0] = cpu_to_le32(cap); prq->host_int_crb_mode = cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); prq->host_rds_crb_mode = cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE); prq->num_rds_rings = cpu_to_le16(nrds_rings); prq->num_sds_rings = cpu_to_le16(nsds_rings); prq->rds_ring_offset = cpu_to_le32(0); val = le32_to_cpu(prq->rds_ring_offset) + (sizeof(nx_hostrq_rds_ring_t) * nrds_rings); prq->sds_ring_offset = cpu_to_le32(val); prq_rds = (nx_hostrq_rds_ring_t *)(prq->data + le32_to_cpu(prq->rds_ring_offset)); for (i = 0; i < nrds_rings; i++) { rds_ring = &recv_ctx->rds_rings[i]; prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); prq_rds[i].ring_kind = cpu_to_le32(i); prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); } prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + le32_to_cpu(prq->sds_ring_offset)); for (i = 0; i < nsds_rings; i++) { sds_ring = &recv_ctx->sds_rings[i]; prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); prq_sds[i].msi_index = cpu_to_le16(i); } phys_addr = hostrq_phys_addr; memset(&cmd, 0, sizeof(cmd)); cmd.req.arg1 = (u32)(phys_addr >> 32); cmd.req.arg2 = (u32)(phys_addr & 0xffffffff); cmd.req.arg3 = rq_size; cmd.req.cmd = NX_CDRP_CMD_CREATE_RX_CTX; err = netxen_issue_cmd(adapter, &cmd); if (err) { printk(KERN_WARNING "Failed to create rx ctx in firmware%d\n", err); goto out_free_rsp; } prsp_rds = ((nx_cardrsp_rds_ring_t *) &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { rds_ring = &recv_ctx->rds_rings[i]; reg = le32_to_cpu(prsp_rds[i].host_producer_crb); rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter, NETXEN_NIC_REG(reg - 0x200)); } prsp_sds = ((nx_cardrsp_sds_ring_t *) &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { sds_ring = &recv_ctx->sds_rings[i]; reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter, NETXEN_NIC_REG(reg - 0x200)); reg = le32_to_cpu(prsp_sds[i].interrupt_crb); sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter, NETXEN_NIC_REG(reg - 0x200)); } recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); recv_ctx->context_id = le16_to_cpu(prsp->context_id); recv_ctx->virt_port = prsp->virt_port; out_free_rsp: dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, cardrsp_phys_addr); out_free_rq: dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); return err; } static void nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) { struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; struct netxen_cmd_args cmd; memset(&cmd, 0, sizeof(cmd)); cmd.req.arg1 = recv_ctx->context_id; cmd.req.arg2 = NX_DESTROY_CTX_RESET; cmd.req.arg3 = 0; cmd.req.cmd = NX_CDRP_CMD_DESTROY_RX_CTX; if (netxen_issue_cmd(adapter, &cmd)) { printk(KERN_WARNING "%s: Failed to destroy rx ctx in firmware\n", netxen_nic_driver_name); } } static int nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter) { nx_hostrq_tx_ctx_t *prq; nx_hostrq_cds_ring_t *prq_cds; nx_cardrsp_tx_ctx_t *prsp; void *rq_addr, *rsp_addr; size_t rq_size, rsp_size; u32 temp; int err = 0; u64 offset, phys_addr; dma_addr_t rq_phys_addr, rsp_phys_addr; struct nx_host_tx_ring *tx_ring = adapter->tx_ring; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; struct netxen_cmd_args cmd; rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t); rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, &rq_phys_addr, GFP_KERNEL); if (!rq_addr) return -ENOMEM; rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t); rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, &rsp_phys_addr, GFP_KERNEL); if (!rsp_addr) { err = -ENOMEM; goto out_free_rq; } prq = rq_addr; prsp = rsp_addr; prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO); prq->capabilities[0] = cpu_to_le32(temp); prq->host_int_crb_mode = cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); prq->interrupt_ctl = 0; prq->msi_index = 0; prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr); offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx); prq->cmd_cons_dma_addr = cpu_to_le64(offset); prq_cds = &prq->cds_ring; prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); phys_addr = rq_phys_addr; memset(&cmd, 0, sizeof(cmd)); cmd.req.arg1 = (u32)(phys_addr >> 32); cmd.req.arg2 = ((u32)phys_addr & 0xffffffff); cmd.req.arg3 = rq_size; cmd.req.cmd = NX_CDRP_CMD_CREATE_TX_CTX; err = netxen_issue_cmd(adapter, &cmd); if (err == NX_RCODE_SUCCESS) { temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, NETXEN_NIC_REG(temp - 0x200)); #if 0 adapter->tx_state = le32_to_cpu(prsp->host_ctx_state); #endif adapter->tx_context_id = le16_to_cpu(prsp->context_id); } else { printk(KERN_WARNING "Failed to create tx ctx in firmware%d\n", err); err = -EIO; } dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, rsp_phys_addr); out_free_rq: dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); return err; } static void nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter) { struct netxen_cmd_args cmd; memset(&cmd, 0, sizeof(cmd)); cmd.req.arg1 = adapter->tx_context_id; cmd.req.arg2 = NX_DESTROY_CTX_RESET; cmd.req.arg3 = 0; cmd.req.cmd = NX_CDRP_CMD_DESTROY_TX_CTX; if (netxen_issue_cmd(adapter, &cmd)) { printk(KERN_WARNING "%s: Failed to destroy tx ctx in firmware\n", netxen_nic_driver_name); } } int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val) { u32 rcode; struct netxen_cmd_args cmd; memset(&cmd, 0, sizeof(cmd)); cmd.req.arg1 = reg; cmd.req.arg2 = 0; cmd.req.arg3 = 0; cmd.req.cmd = NX_CDRP_CMD_READ_PHY; cmd.rsp.arg1 = 1; rcode = netxen_issue_cmd(adapter, &cmd); if (rcode != NX_RCODE_SUCCESS) return -EIO; if (val == NULL) return -EIO; *val = cmd.rsp.arg1; return 0; } int nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val) { u32 rcode; struct netxen_cmd_args cmd; memset(&cmd, 0, sizeof(cmd)); cmd.req.arg1 = reg; cmd.req.arg2 = val; cmd.req.arg3 = 0; cmd.req.cmd = NX_CDRP_CMD_WRITE_PHY; rcode = netxen_issue_cmd(adapter, &cmd); if (rcode != NX_RCODE_SUCCESS) return -EIO; return 0; } static u64 ctx_addr_sig_regs[][3] = { {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)}, {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)}, {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)}, {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)} }; #define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0]) #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2]) #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1]) #define lower32(x) ((u32)((x) & 0xffffffff)) #define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff)) static struct netxen_recv_crb recv_crb_registers[] = { /* Instance 0 */ { /* crb_rcv_producer: */ { NETXEN_NIC_REG(0x100), /* Jumbo frames */ NETXEN_NIC_REG(0x110), /* LRO */ NETXEN_NIC_REG(0x120) }, /* crb_sts_consumer: */ { NETXEN_NIC_REG(0x138), NETXEN_NIC_REG_2(0x000), NETXEN_NIC_REG_2(0x004), NETXEN_NIC_REG_2(0x008), }, /* sw_int_mask */ { CRB_SW_INT_MASK_0, NETXEN_NIC_REG_2(0x044), NETXEN_NIC_REG_2(0x048), NETXEN_NIC_REG_2(0x04c), }, }, /* Instance 1 */ { /* crb_rcv_producer: */ { NETXEN_NIC_REG(0x144), /* Jumbo frames */ NETXEN_NIC_REG(0x154), /* LRO */ NETXEN_NIC_REG(0x164) }, /* crb_sts_consumer: */ { NETXEN_NIC_REG(0x17c), NETXEN_NIC_REG_2(0x020), NETXEN_NIC_REG_2(0x024), NETXEN_NIC_REG_2(0x028), }, /* sw_int_mask */ { CRB_SW_INT_MASK_1, NETXEN_NIC_REG_2(0x064), NETXEN_NIC_REG_2(0x068), NETXEN_NIC_REG_2(0x06c), }, }, /* Instance 2 */ { /* crb_rcv_producer: */ { NETXEN_NIC_REG(0x1d8), /* Jumbo frames */ NETXEN_NIC_REG(0x1f8), /* LRO */ NETXEN_NIC_REG(0x208) }, /* crb_sts_consumer: */ { NETXEN_NIC_REG(0x220), NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), }, /* sw_int_mask */ { CRB_SW_INT_MASK_2, NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), }, }, /* Instance 3 */ { /* crb_rcv_producer: */ { NETXEN_NIC_REG(0x22c), /* Jumbo frames */ NETXEN_NIC_REG(0x23c), /* LRO */ NETXEN_NIC_REG(0x24c) }, /* crb_sts_consumer: */ { NETXEN_NIC_REG(0x264), NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), }, /* sw_int_mask */ { CRB_SW_INT_MASK_3, NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), }, }, }; static int netxen_init_old_ctx(struct netxen_adapter *adapter) { struct netxen_recv_context *recv_ctx; struct nx_host_rds_ring *rds_ring; struct nx_host_sds_ring *sds_ring; struct nx_host_tx_ring *tx_ring; int ring; int port = adapter->portnum; struct netxen_ring_ctx *hwctx; u32 signature; tx_ring = adapter->tx_ring; recv_ctx = &adapter->recv_ctx; hwctx = recv_ctx->hwctx; hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr); hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc); for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; hwctx->rcv_rings[ring].addr = cpu_to_le64(rds_ring->phys_addr); hwctx->rcv_rings[ring].size = cpu_to_le32(rds_ring->num_desc); } for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (ring == 0) { hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr); hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc); } hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr); hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc); hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring); } hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings); signature = (adapter->max_sds_rings > 1) ? NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE; NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port), lower32(recv_ctx->phys_addr)); NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port), upper32(recv_ctx->phys_addr)); NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), signature | port); return 0; } int netxen_alloc_hw_resources(struct netxen_adapter *adapter) { void *addr; int err = 0; int ring; struct netxen_recv_context *recv_ctx; struct nx_host_rds_ring *rds_ring; struct nx_host_sds_ring *sds_ring; struct nx_host_tx_ring *tx_ring; struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; int port = adapter->portnum; recv_ctx = &adapter->recv_ctx; tx_ring = adapter->tx_ring; addr = dma_alloc_coherent(&pdev->dev, sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), &recv_ctx->phys_addr, GFP_KERNEL); if (addr == NULL) { dev_err(&pdev->dev, "failed to allocate hw context\n"); return -ENOMEM; } recv_ctx->hwctx = addr; recv_ctx->hwctx->ctx_id = cpu_to_le32(port); recv_ctx->hwctx->cmd_consumer_offset = cpu_to_le64(recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx)); tx_ring->hw_consumer = (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx)); /* cmd desc ring */ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), &tx_ring->phys_addr, GFP_KERNEL); if (addr == NULL) { dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n", netdev->name); err = -ENOMEM; goto err_out_free; } tx_ring->desc_head = addr; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; addr = dma_alloc_coherent(&adapter->pdev->dev, RCV_DESC_RINGSIZE(rds_ring), &rds_ring->phys_addr, GFP_KERNEL); if (addr == NULL) { dev_err(&pdev->dev, "%s: failed to allocate rds ring [%d]\n", netdev->name, ring); err = -ENOMEM; goto err_out_free; } rds_ring->desc_head = addr; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter, recv_crb_registers[port].crb_rcv_producer[ring]); } for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; addr = dma_alloc_coherent(&adapter->pdev->dev, STATUS_DESC_RINGSIZE(sds_ring), &sds_ring->phys_addr, GFP_KERNEL); if (addr == NULL) { dev_err(&pdev->dev, "%s: failed to allocate sds ring [%d]\n", netdev->name, ring); err = -ENOMEM; goto err_out_free; } sds_ring->desc_head = addr; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter, recv_crb_registers[port].crb_sts_consumer[ring]); sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter, recv_crb_registers[port].sw_int_mask[ring]); } } if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state)) goto done; err = nx_fw_cmd_create_rx_ctx(adapter); if (err) goto err_out_free; err = nx_fw_cmd_create_tx_ctx(adapter); if (err) goto err_out_free; } else { err = netxen_init_old_ctx(adapter); if (err) goto err_out_free; } done: return 0; err_out_free: netxen_free_hw_resources(adapter); return err; } void netxen_free_hw_resources(struct netxen_adapter *adapter) { struct netxen_recv_context *recv_ctx; struct nx_host_rds_ring *rds_ring; struct nx_host_sds_ring *sds_ring; struct nx_host_tx_ring *tx_ring; int ring; int port = adapter->portnum; if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state)) goto done; nx_fw_cmd_destroy_rx_ctx(adapter); nx_fw_cmd_destroy_tx_ctx(adapter); } else { netxen_api_lock(adapter); NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), NETXEN_CTX_D3_RESET | port); netxen_api_unlock(adapter); } /* Allow dma queues to drain after context reset */ msleep(20); done: recv_ctx = &adapter->recv_ctx; if (recv_ctx->hwctx != NULL) { dma_free_coherent(&adapter->pdev->dev, sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), recv_ctx->hwctx, recv_ctx->phys_addr); recv_ctx->hwctx = NULL; } tx_ring = adapter->tx_ring; if (tx_ring->desc_head != NULL) { dma_free_coherent(&adapter->pdev->dev, TX_DESC_RINGSIZE(tx_ring), tx_ring->desc_head, tx_ring->phys_addr); tx_ring->desc_head = NULL; } for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; if (rds_ring->desc_head != NULL) { dma_free_coherent(&adapter->pdev->dev, RCV_DESC_RINGSIZE(rds_ring), rds_ring->desc_head, rds_ring->phys_addr); rds_ring->desc_head = NULL; } } for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (sds_ring->desc_head != NULL) { dma_free_coherent(&adapter->pdev->dev, STATUS_DESC_RINGSIZE(sds_ring), sds_ring->desc_head, sds_ring->phys_addr); sds_ring->desc_head = NULL; } } }
linux-master
drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <asm/byteorder.h> #include <linux/dma-mapping.h> #include <linux/if_vlan.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/stddef.h> #include <linux/workqueue.h> #include <net/ipv6.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/io.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/qed/qed_ll2_if.h> #include "qed.h" #include "qed_cxt.h" #include "qed_dev_api.h" #include "qed_hsi.h" #include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_ll2.h" #include "qed_mcp.h" #include "qed_ooo.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_rdma.h" #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered) #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registered) #define QED_LL2_TX_SIZE (256) #define QED_LL2_RX_SIZE (4096) #define QED_LL2_INVALID_STATS_ID 0xff struct qed_cb_ll2_info { int rx_cnt; u32 rx_size; u8 handle; /* Lock protecting LL2 buffer lists in sleepless context */ spinlock_t lock; struct list_head list; const struct qed_ll2_cb_ops *cbs; void *cb_cookie; }; struct qed_ll2_buffer { struct list_head list; void *data; dma_addr_t phys_addr; }; static u8 qed_ll2_handle_to_stats_id(struct qed_hwfn *p_hwfn, u8 ll2_queue_type, u8 qid) { u8 stats_id; /* For legacy (RAM based) queues, the stats_id will be set as the * queue_id. Otherwise (context based queue), it will be set to * the "abs_pf_id" offset from the end of the RAM based queue IDs. * If the final value exceeds the total counters amount, return * INVALID value to indicate that the stats for this connection should * be disabled. */ if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY) stats_id = qid; else stats_id = MAX_NUM_LL2_RX_RAM_QUEUES + p_hwfn->abs_pf_id; if (stats_id < MAX_NUM_LL2_TX_STATS_COUNTERS) return stats_id; else return QED_LL2_INVALID_STATS_ID; } static void qed_ll2b_complete_tx_packet(void *cxt, u8 connection_handle, void *cookie, dma_addr_t first_frag_addr, bool b_last_fragment, bool b_last_packet) { struct qed_hwfn *p_hwfn = cxt; struct qed_dev *cdev = p_hwfn->cdev; struct sk_buff *skb = cookie; /* All we need to do is release the mapping */ dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr, skb_headlen(skb), DMA_TO_DEVICE); if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb) cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb, b_last_fragment); dev_kfree_skb_any(skb); } static int qed_ll2_alloc_buffer(struct qed_dev *cdev, u8 **data, dma_addr_t *phys_addr) { *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC); if (!(*data)) { DP_INFO(cdev, "Failed to allocate LL2 buffer data\n"); return -ENOMEM; } *phys_addr = dma_map_single(&cdev->pdev->dev, ((*data) + NET_SKB_PAD), cdev->ll2->rx_size, DMA_FROM_DEVICE); if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) { DP_INFO(cdev, "Failed to map LL2 buffer data\n"); kfree((*data)); return -ENOMEM; } return 0; } static int qed_ll2_dealloc_buffer(struct qed_dev *cdev, struct qed_ll2_buffer *buffer) { spin_lock_bh(&cdev->ll2->lock); dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, cdev->ll2->rx_size, DMA_FROM_DEVICE); kfree(buffer->data); list_del(&buffer->list); cdev->ll2->rx_cnt--; if (!cdev->ll2->rx_cnt) DP_INFO(cdev, "All LL2 entries were removed\n"); spin_unlock_bh(&cdev->ll2->lock); return 0; } static void qed_ll2_kill_buffers(struct qed_dev *cdev) { struct qed_ll2_buffer *buffer, *tmp_buffer; list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) qed_ll2_dealloc_buffer(cdev, buffer); } static void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data) { struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_buffer *buffer = data->cookie; struct qed_dev *cdev = p_hwfn->cdev; dma_addr_t new_phys_addr; struct sk_buff *skb; bool reuse = false; int rc = -EINVAL; u8 *new_data; DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA), "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n", (u64)data->rx_buf_addr, data->u.placement_offset, data->length.packet_length, data->parse_flags, data->vlan, data->opaque_data_0, data->opaque_data_1); if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, buffer->data, data->length.packet_length, false); } /* Determine if data is valid */ if (data->length.packet_length < ETH_HLEN) reuse = true; /* Allocate a replacement for buffer; Reuse upon failure */ if (!reuse) rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data, &new_phys_addr); /* If need to reuse or there's no replacement buffer, repost this */ if (rc) goto out_post; dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, cdev->ll2->rx_size, DMA_FROM_DEVICE); skb = slab_build_skb(buffer->data); if (!skb) { DP_INFO(cdev, "Failed to build SKB\n"); kfree(buffer->data); goto out_post1; } data->u.placement_offset += NET_SKB_PAD; skb_reserve(skb, data->u.placement_offset); skb_put(skb, data->length.packet_length); skb_checksum_none_assert(skb); /* Get parital ethernet information instead of eth_type_trans(), * Since we don't have an associated net_device. */ skb_reset_mac_header(skb); skb->protocol = eth_hdr(skb)->h_proto; /* Pass SKB onward */ if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) { if (data->vlan) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), data->vlan); cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb, data->opaque_data_0, data->opaque_data_1); } else { DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA | QED_MSG_LL2 | QED_MSG_STORAGE), "Dropping the packet\n"); kfree(buffer->data); } out_post1: /* Update Buffer information and update FW producer */ buffer->data = new_data; buffer->phys_addr = new_phys_addr; out_post: rc = qed_ll2_post_rx_buffer(p_hwfn, cdev->ll2->handle, buffer->phys_addr, 0, buffer, 1); if (rc) qed_ll2_dealloc_buffer(cdev, buffer); } static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn, u8 connection_handle, bool b_lock, bool b_only_active) { struct qed_ll2_info *p_ll2_conn, *p_ret = NULL; if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) return NULL; if (!p_hwfn->p_ll2_info) return NULL; p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle]; if (b_only_active) { if (b_lock) mutex_lock(&p_ll2_conn->mutex); if (p_ll2_conn->b_active) p_ret = p_ll2_conn; if (b_lock) mutex_unlock(&p_ll2_conn->mutex); } else { p_ret = p_ll2_conn; } return p_ret; } static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn, u8 connection_handle) { return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true); } static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn, u8 connection_handle) { return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true); } static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn *p_hwfn, u8 connection_handle) { return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false); } static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) { bool b_last_packet = false, b_last_frag = false; struct qed_ll2_tx_packet *p_pkt = NULL; struct qed_ll2_info *p_ll2_conn; struct qed_ll2_tx_queue *p_tx; unsigned long flags = 0; dma_addr_t tx_frag; p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); if (!p_ll2_conn) return; p_tx = &p_ll2_conn->tx_queue; spin_lock_irqsave(&p_tx->lock, flags); while (!list_empty(&p_tx->active_descq)) { p_pkt = list_first_entry(&p_tx->active_descq, struct qed_ll2_tx_packet, list_entry); if (!p_pkt) break; list_del(&p_pkt->list_entry); b_last_packet = list_empty(&p_tx->active_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); spin_unlock_irqrestore(&p_tx->lock, flags); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { struct qed_ooo_buffer *p_buffer; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buffer); } else { p_tx->cur_completing_packet = *p_pkt; p_tx->cur_completing_bd_idx = 1; b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; tx_frag = p_pkt->bds_set[0].tx_frag; p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie, p_ll2_conn->my_id, p_pkt->cookie, tx_frag, b_last_frag, b_last_packet); } spin_lock_irqsave(&p_tx->lock, flags); } spin_unlock_irqrestore(&p_tx->lock, flags); } static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) { struct qed_ll2_info *p_ll2_conn = p_cookie; struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0; struct qed_ll2_tx_packet *p_pkt; bool b_last_frag = false; unsigned long flags; int rc = -EINVAL; if (!p_ll2_conn) return rc; spin_lock_irqsave(&p_tx->lock, flags); if (p_tx->b_completing_packet) { rc = -EBUSY; goto out; } new_idx = le16_to_cpu(*p_tx->p_fw_cons); num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); while (num_bds) { if (list_empty(&p_tx->active_descq)) goto out; p_pkt = list_first_entry(&p_tx->active_descq, struct qed_ll2_tx_packet, list_entry); if (!p_pkt) goto out; p_tx->b_completing_packet = true; p_tx->cur_completing_packet = *p_pkt; num_bds_in_packet = p_pkt->bd_used; list_del(&p_pkt->list_entry); if (unlikely(num_bds < num_bds_in_packet)) { DP_NOTICE(p_hwfn, "Rest of BDs does not cover whole packet\n"); goto out; } num_bds -= num_bds_in_packet; p_tx->bds_idx += num_bds_in_packet; while (num_bds_in_packet--) qed_chain_consume(&p_tx->txq_chain); p_tx->cur_completing_bd_idx = 1; b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); spin_unlock_irqrestore(&p_tx->lock, flags); p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie, p_ll2_conn->my_id, p_pkt->cookie, p_pkt->bds_set[0].tx_frag, b_last_frag, !num_bds); spin_lock_irqsave(&p_tx->lock, flags); } p_tx->b_completing_packet = false; rc = 0; out: spin_unlock_irqrestore(&p_tx->lock, flags); return rc; } static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn, union core_rx_cqe_union *p_cqe, struct qed_ll2_comp_rx_data *data) { data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags); data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length); data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan); data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi); data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo); data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error; data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id); data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp); } static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn, union core_rx_cqe_union *p_cqe, struct qed_ll2_comp_rx_data *data) { data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags); data->err_flags = le16_to_cpu(p_cqe->rx_cqe_fp.err_flags.flags); data->length.packet_length = le16_to_cpu(p_cqe->rx_cqe_fp.packet_length); data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan); data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]); data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]); data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset; } static int qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn, union core_rx_cqe_union *p_cqe, unsigned long *p_lock_flags) { struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; struct core_rx_slow_path_cqe *sp_cqe; sp_cqe = &p_cqe->rx_cqe_sp; if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) { DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n", sp_cqe->ramrod_cmd_id); return -EINVAL; } if (!p_ll2_conn->cbs.slowpath_cb) { DP_NOTICE(p_hwfn, "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n"); return -EINVAL; } spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags); p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie, p_ll2_conn->my_id, le32_to_cpu(sp_cqe->opaque_data.data[0]), le32_to_cpu(sp_cqe->opaque_data.data[1])); spin_lock_irqsave(&p_rx->lock, *p_lock_flags); return 0; } static int qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn, union core_rx_cqe_union *p_cqe, unsigned long *p_lock_flags, bool b_last_cqe) { struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; struct qed_ll2_rx_packet *p_pkt = NULL; struct qed_ll2_comp_rx_data data; if (!list_empty(&p_rx->active_descq)) p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); if (unlikely(!p_pkt)) { DP_NOTICE(p_hwfn, "[%d] LL2 Rx completion but active_descq is empty\n", p_ll2_conn->input.conn_type); return -EIO; } list_del(&p_pkt->list_entry); if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR) qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data); else qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data); if (unlikely(qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)) DP_NOTICE(p_hwfn, "Mismatch between active_descq and the LL2 Rx chain\n"); list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); data.connection_handle = p_ll2_conn->my_id; data.cookie = p_pkt->cookie; data.rx_buf_addr = p_pkt->rx_buf_addr; data.b_last_packet = b_last_cqe; spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags); p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data); spin_lock_irqsave(&p_rx->lock, *p_lock_flags); return 0; } static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) { struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie; struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; union core_rx_cqe_union *cqe = NULL; u16 cq_new_idx = 0, cq_old_idx = 0; unsigned long flags = 0; int rc = 0; if (!p_ll2_conn) return rc; spin_lock_irqsave(&p_rx->lock, flags); if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) { spin_unlock_irqrestore(&p_rx->lock, flags); return 0; } cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); while (cq_new_idx != cq_old_idx) { bool b_last_cqe = (cq_new_idx == cq_old_idx); cqe = (union core_rx_cqe_union *) qed_chain_consume(&p_rx->rcq_chain); cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); DP_VERBOSE(p_hwfn, QED_MSG_LL2, "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n", cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type); switch (cqe->rx_cqe_sp.type) { case CORE_RX_CQE_TYPE_SLOW_PATH: rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn, cqe, &flags); break; case CORE_RX_CQE_TYPE_GSI_OFFLOAD: case CORE_RX_CQE_TYPE_REGULAR: rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn, cqe, &flags, b_last_cqe); break; default: rc = -EIO; } } spin_unlock_irqrestore(&p_rx->lock, flags); return rc; } static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) { struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ll2_rx_packet *p_pkt = NULL; struct qed_ll2_rx_queue *p_rx; unsigned long flags = 0; p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); if (!p_ll2_conn) return; p_rx = &p_ll2_conn->rx_queue; spin_lock_irqsave(&p_rx->lock, flags); while (!list_empty(&p_rx->active_descq)) { p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); if (!p_pkt) break; list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); spin_unlock_irqrestore(&p_rx->lock, flags); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { struct qed_ooo_buffer *p_buffer; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buffer); } else { dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr; void *cookie = p_pkt->cookie; bool b_last; b_last = list_empty(&p_rx->active_descq); p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie, p_ll2_conn->my_id, cookie, rx_buf_addr, b_last); } spin_lock_irqsave(&p_rx->lock, flags); } spin_unlock_irqrestore(&p_rx->lock, flags); } static bool qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn, struct core_rx_slow_path_cqe *p_cqe) { struct ooo_opaque *ooo_opq; u32 cid; if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) return false; ooo_opq = (struct ooo_opaque *)&p_cqe->opaque_data; if (ooo_opq->ooo_opcode != TCP_EVENT_DELETE_ISLES) return false; /* Need to make a flush */ cid = le32_to_cpu(ooo_opq->cid); qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid); return true; } static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; u16 packet_length = 0, parse_flags = 0, vlan = 0; struct qed_ll2_rx_packet *p_pkt = NULL; union core_rx_cqe_union *cqe = NULL; u16 cq_new_idx = 0, cq_old_idx = 0; struct qed_ooo_buffer *p_buffer; struct ooo_opaque *ooo_opq; u8 placement_offset = 0; u8 cqe_type; u32 cid; cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); if (cq_new_idx == cq_old_idx) return 0; while (cq_new_idx != cq_old_idx) { struct core_rx_fast_path_cqe *p_cqe_fp; cqe = qed_chain_consume(&p_rx->rcq_chain); cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); cqe_type = cqe->rx_cqe_sp.type; if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH) if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn, &cqe->rx_cqe_sp)) continue; if (unlikely(cqe_type != CORE_RX_CQE_TYPE_REGULAR)) { DP_NOTICE(p_hwfn, "Got a non-regular LB LL2 completion [type 0x%02x]\n", cqe_type); return -EINVAL; } p_cqe_fp = &cqe->rx_cqe_fp; placement_offset = p_cqe_fp->placement_offset; parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags); packet_length = le16_to_cpu(p_cqe_fp->packet_length); vlan = le16_to_cpu(p_cqe_fp->vlan); ooo_opq = (struct ooo_opaque *)&p_cqe_fp->opaque_data; qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, ooo_opq); cid = le32_to_cpu(ooo_opq->cid); /* Process delete isle first */ if (ooo_opq->drop_size) qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid, ooo_opq->drop_isle, ooo_opq->drop_size); if (ooo_opq->ooo_opcode == TCP_EVENT_NOP) continue; /* Now process create/add/join isles */ if (unlikely(list_empty(&p_rx->active_descq))) { DP_NOTICE(p_hwfn, "LL2 OOO RX chain has no submitted buffers\n" ); return -EIO; } p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); if (likely(ooo_opq->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE || ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT || ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT || ooo_opq->ooo_opcode == TCP_EVENT_ADD_PEN || ooo_opq->ooo_opcode == TCP_EVENT_JOIN)) { if (unlikely(!p_pkt)) { DP_NOTICE(p_hwfn, "LL2 OOO RX packet is not valid\n"); return -EIO; } list_del(&p_pkt->list_entry); p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; p_buffer->packet_length = packet_length; p_buffer->parse_flags = parse_flags; p_buffer->vlan = vlan; p_buffer->placement_offset = placement_offset; qed_chain_consume(&p_rx->rxq_chain); list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); switch (ooo_opq->ooo_opcode) { case TCP_EVENT_ADD_NEW_ISLE: qed_ooo_add_new_isle(p_hwfn, p_hwfn->p_ooo_info, cid, ooo_opq->ooo_isle, p_buffer); break; case TCP_EVENT_ADD_ISLE_RIGHT: qed_ooo_add_new_buffer(p_hwfn, p_hwfn->p_ooo_info, cid, ooo_opq->ooo_isle, p_buffer, QED_OOO_RIGHT_BUF); break; case TCP_EVENT_ADD_ISLE_LEFT: qed_ooo_add_new_buffer(p_hwfn, p_hwfn->p_ooo_info, cid, ooo_opq->ooo_isle, p_buffer, QED_OOO_LEFT_BUF); break; case TCP_EVENT_JOIN: qed_ooo_add_new_buffer(p_hwfn, p_hwfn->p_ooo_info, cid, ooo_opq->ooo_isle + 1, p_buffer, QED_OOO_LEFT_BUF); qed_ooo_join_isles(p_hwfn, p_hwfn->p_ooo_info, cid, ooo_opq->ooo_isle); break; case TCP_EVENT_ADD_PEN: qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buffer, true); break; } } else { DP_NOTICE(p_hwfn, "Unexpected event (%d) TX OOO completion\n", ooo_opq->ooo_opcode); } } return 0; } static void qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { struct qed_ll2_tx_pkt_info tx_pkt; struct qed_ooo_buffer *p_buffer; u16 l4_hdr_offset_w; dma_addr_t first_frag; u8 bd_flags; int rc; /* Submit Tx buffers here */ while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn, p_hwfn->p_ooo_info))) { l4_hdr_offset_w = 0; bd_flags = 0; first_frag = p_buffer->rx_buffer_phys_addr + p_buffer->placement_offset; SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1); SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1); memset(&tx_pkt, 0, sizeof(tx_pkt)); tx_pkt.num_of_bds = 1; tx_pkt.vlan = p_buffer->vlan; tx_pkt.bd_flags = bd_flags; tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w; switch (p_ll2_conn->tx_dest) { case CORE_TX_DEST_NW: tx_pkt.tx_dest = QED_LL2_TX_DEST_NW; break; case CORE_TX_DEST_LB: tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; break; case CORE_TX_DEST_DROP: default: tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP; break; } tx_pkt.first_frag = first_frag; tx_pkt.first_frag_len = p_buffer->packet_length; tx_pkt.cookie = p_buffer; rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, &tx_pkt, true); if (rc) { qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buffer, false); break; } } } static void qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { struct qed_ooo_buffer *p_buffer; int rc; while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, p_hwfn->p_ooo_info))) { rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id, p_buffer->rx_buffer_phys_addr, 0, p_buffer, true); if (rc) { qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buffer); break; } } } static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) { struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; int rc; if (!p_ll2_conn) return 0; if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) return 0; rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); if (rc) return rc; qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); return 0; } static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) { struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; struct qed_ll2_tx_packet *p_pkt = NULL; struct qed_ooo_buffer *p_buffer; bool b_dont_submit_rx = false; u16 new_idx = 0, num_bds = 0; int rc; if (unlikely(!p_ll2_conn)) return 0; if (unlikely(!QED_LL2_TX_REGISTERED(p_ll2_conn))) return 0; new_idx = le16_to_cpu(*p_tx->p_fw_cons); num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); if (unlikely(!num_bds)) return 0; while (num_bds) { if (list_empty(&p_tx->active_descq)) return -EINVAL; p_pkt = list_first_entry(&p_tx->active_descq, struct qed_ll2_tx_packet, list_entry); if (unlikely(!p_pkt)) return -EINVAL; if (unlikely(p_pkt->bd_used != 1)) { DP_NOTICE(p_hwfn, "Unexpectedly many BDs(%d) in TX OOO completion\n", p_pkt->bd_used); return -EINVAL; } list_del(&p_pkt->list_entry); num_bds--; p_tx->bds_idx++; qed_chain_consume(&p_tx->txq_chain); p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); if (b_dont_submit_rx) { qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buffer); continue; } rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id, p_buffer->rx_buffer_phys_addr, 0, p_buffer, true); if (rc != 0) { qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buffer); b_dont_submit_rx = true; } } qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); return 0; } static void qed_ll2_stop_ooo(struct qed_hwfn *p_hwfn) { u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; DP_VERBOSE(p_hwfn, (QED_MSG_STORAGE | QED_MSG_LL2), "Stopping LL2 OOO queue [%02x]\n", *handle); qed_ll2_terminate_connection(p_hwfn, *handle); qed_ll2_release_connection(p_hwfn, *handle); *handle = QED_LL2_UNUSED_HANDLE; } static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn, u8 action_on_error) { enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type; struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; struct core_rx_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u16 cqe_pbl_size; int rc = 0; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_ll2_conn->cid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, CORE_RAMROD_RX_QUEUE_START, PROTOCOLID_CORE, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.core_rx_queue_start; memset(p_ramrod, 0, sizeof(*p_ramrod)); p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); p_ramrod->sb_index = p_rx->rx_sb_index; p_ramrod->complete_event_flg = 1; p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu); DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr); cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain); p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, qed_chain_get_pbl_phys(&p_rx->rcq_chain)); p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; p_ramrod->inner_vlan_stripping_en = p_ll2_conn->input.rx_vlan_removal_en; if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) p_ramrod->report_outer_vlan = 1; p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) && p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE && conn_type != QED_LL2_TYPE_IWARP && (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))) { p_ramrod->mf_si_bcast_accept_all = 1; p_ramrod->mf_si_mcast_accept_all = 1; } else { p_ramrod->mf_si_bcast_accept_all = 0; p_ramrod->mf_si_mcast_accept_all = 0; } p_ramrod->action_on_error.error_type = action_on_error; p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; p_ramrod->zero_prod_flg = 1; return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type; struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; struct core_tx_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u16 pq_id = 0, pbl_size; int rc = -EINVAL; if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) return 0; if (likely(p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)) p_ll2_conn->tx_stats_en = 0; else p_ll2_conn->tx_stats_en = 1; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_ll2_conn->cid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, CORE_RAMROD_TX_QUEUE_START, PROTOCOLID_CORE, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.core_tx_queue_start; p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); p_ramrod->sb_index = p_tx->tx_sb_index; p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu); p_ramrod->stats_en = p_ll2_conn->tx_stats_en; p_ramrod->stats_id = p_ll2_conn->tx_stats_id; DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, qed_chain_get_pbl_phys(&p_tx->txq_chain)); pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain); p_ramrod->pbl_size = cpu_to_le16(pbl_size); switch (p_ll2_conn->input.tx_tc) { case PURE_LB_TC: pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); break; case PKT_LB_TC: pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO); break; default: pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); break; } p_ramrod->qm_pq_id = cpu_to_le16(pq_id); switch (conn_type) { case QED_LL2_TYPE_FCOE: p_ramrod->conn_type = PROTOCOLID_FCOE; break; case QED_LL2_TYPE_TCP_ULP: p_ramrod->conn_type = PROTOCOLID_TCP_ULP; break; case QED_LL2_TYPE_ROCE: p_ramrod->conn_type = PROTOCOLID_ROCE; break; case QED_LL2_TYPE_IWARP: p_ramrod->conn_type = PROTOCOLID_IWARP; break; case QED_LL2_TYPE_OOO: if (p_hwfn->hw_info.personality == QED_PCI_ISCSI || p_hwfn->hw_info.personality == QED_PCI_NVMETCP) p_ramrod->conn_type = PROTOCOLID_TCP_ULP; else p_ramrod->conn_type = PROTOCOLID_IWARP; break; default: p_ramrod->conn_type = PROTOCOLID_ETH; DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); } p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) return rc; rc = qed_db_recovery_add(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg, DB_REC_WIDTH_32B, DB_REC_KERNEL); return rc; } static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { struct core_rx_stop_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_ll2_conn->cid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, CORE_RAMROD_RX_QUEUE_STOP, PROTOCOLID_CORE, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.core_rx_queue_stop; p_ramrod->complete_event_flg = 1; p_ramrod->queue_id = p_ll2_conn->queue_id; return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg); /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_ll2_conn->cid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, CORE_RAMROD_TX_QUEUE_STOP, PROTOCOLID_CORE, &init_data); if (rc) return rc; return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_info) { struct qed_chain_init_params params = { .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, .cnt_type = QED_CHAIN_CNT_TYPE_U16, .num_elems = p_ll2_info->input.rx_num_desc, }; struct qed_dev *cdev = p_hwfn->cdev; struct qed_ll2_rx_packet *p_descq; u32 capacity; int rc = 0; if (!p_ll2_info->input.rx_num_desc) goto out; params.mode = QED_CHAIN_MODE_NEXT_PTR; params.elem_size = sizeof(struct core_rx_bd); rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rxq_chain, &params); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n"); goto out; } capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain); p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet), GFP_KERNEL); if (!p_descq) { rc = -ENOMEM; DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n"); goto out; } p_ll2_info->rx_queue.descq_array = p_descq; params.mode = QED_CHAIN_MODE_PBL; params.elem_size = sizeof(struct core_rx_fast_path_cqe); rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rcq_chain, &params); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n"); goto out; } DP_VERBOSE(p_hwfn, QED_MSG_LL2, "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n", p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc); out: return rc; } static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_info) { struct qed_chain_init_params params = { .mode = QED_CHAIN_MODE_PBL, .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, .cnt_type = QED_CHAIN_CNT_TYPE_U16, .num_elems = p_ll2_info->input.tx_num_desc, .elem_size = sizeof(struct core_tx_bd), }; struct qed_ll2_tx_packet *p_descq; size_t desc_size; u32 capacity; int rc = 0; if (!p_ll2_info->input.tx_num_desc) goto out; rc = qed_chain_alloc(p_hwfn->cdev, &p_ll2_info->tx_queue.txq_chain, &params); if (rc) goto out; capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain); /* All bds_set elements are flexibily added. */ desc_size = struct_size(p_descq, bds_set, p_ll2_info->input.tx_max_bds_per_packet); p_descq = kcalloc(capacity, desc_size, GFP_KERNEL); if (!p_descq) { rc = -ENOMEM; goto out; } p_ll2_info->tx_queue.descq_mem = p_descq; DP_VERBOSE(p_hwfn, QED_MSG_LL2, "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc); out: if (rc) DP_NOTICE(p_hwfn, "Can't allocate memory for Tx LL2 with 0x%08x buffers\n", p_ll2_info->input.tx_num_desc); return rc; } static int qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_info, u16 mtu) { struct qed_ooo_buffer *p_buf = NULL; void *p_virt; u16 buf_idx; int rc = 0; if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO) return rc; /* Correct number of requested OOO buffers if needed */ if (!p_ll2_info->input.rx_num_ooo_buffers) { u16 num_desc = p_ll2_info->input.rx_num_desc; if (!num_desc) return -EINVAL; p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2; } for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers; buf_idx++) { p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL); if (!p_buf) { rc = -ENOMEM; goto out; } p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE; p_buf->rx_buffer_size = (p_buf->rx_buffer_size + ETH_CACHE_LINE_SIZE - 1) & ~(ETH_CACHE_LINE_SIZE - 1); p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, p_buf->rx_buffer_size, &p_buf->rx_buffer_phys_addr, GFP_KERNEL); if (!p_virt) { kfree(p_buf); rc = -ENOMEM; goto out; } p_buf->rx_buffer_virt_addr = p_virt; qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf); } DP_VERBOSE(p_hwfn, QED_MSG_LL2, "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n", p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size); out: return rc; } static int qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs) { if (!cbs || (!cbs->rx_comp_cb || !cbs->rx_release_cb || !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie)) return -EINVAL; p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb; p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb; p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb; p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb; p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb; p_ll2_info->cbs.cookie = cbs->cookie; return 0; } static void _qed_ll2_calc_allowed_conns(struct qed_hwfn *p_hwfn, struct qed_ll2_acquire_data *data, u8 *start_idx, u8 *last_idx) { /* LL2 queues handles will be split as follows: * First will be the legacy queues, and then the ctx based. */ if (data->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) { *start_idx = QED_LL2_LEGACY_CONN_BASE_PF; *last_idx = *start_idx + QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF; } else { /* QED_LL2_RX_TYPE_CTX */ *start_idx = QED_LL2_CTX_CONN_BASE_PF; *last_idx = *start_idx + QED_MAX_NUM_OF_CTX_LL2_CONNS_PF; } } static enum core_error_handle qed_ll2_get_error_choice(enum qed_ll2_error_handle err) { switch (err) { case QED_LL2_DROP_PACKET: return LL2_DROP_PACKET; case QED_LL2_DO_NOTHING: return LL2_DO_NOTHING; case QED_LL2_ASSERT: return LL2_ASSERT; default: return LL2_DO_NOTHING; } } int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) { struct qed_hwfn *p_hwfn = cxt; qed_int_comp_cb_t comp_rx_cb, comp_tx_cb; struct qed_ll2_info *p_ll2_info = NULL; u8 i, first_idx, last_idx, *p_tx_max; int rc; if (!data->p_connection_handle || !p_hwfn->p_ll2_info) return -EINVAL; _qed_ll2_calc_allowed_conns(p_hwfn, data, &first_idx, &last_idx); /* Find a free connection to be used */ for (i = first_idx; i < last_idx; i++) { mutex_lock(&p_hwfn->p_ll2_info[i].mutex); if (p_hwfn->p_ll2_info[i].b_active) { mutex_unlock(&p_hwfn->p_ll2_info[i].mutex); continue; } p_hwfn->p_ll2_info[i].b_active = true; p_ll2_info = &p_hwfn->p_ll2_info[i]; mutex_unlock(&p_hwfn->p_ll2_info[i].mutex); break; } if (!p_ll2_info) return -EBUSY; memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input)); switch (data->input.tx_dest) { case QED_LL2_TX_DEST_NW: p_ll2_info->tx_dest = CORE_TX_DEST_NW; break; case QED_LL2_TX_DEST_LB: p_ll2_info->tx_dest = CORE_TX_DEST_LB; break; case QED_LL2_TX_DEST_DROP: p_ll2_info->tx_dest = CORE_TX_DEST_DROP; break; default: return -EINVAL; } if (data->input.conn_type == QED_LL2_TYPE_OOO || data->input.secondary_queue) p_ll2_info->main_func_queue = false; else p_ll2_info->main_func_queue = true; /* Correct maximum number of Tx BDs */ p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet; if (*p_tx_max == 0) *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET; else *p_tx_max = min_t(u8, *p_tx_max, CORE_LL2_TX_MAX_BDS_PER_PACKET); rc = qed_ll2_set_cbs(p_ll2_info, data->cbs); if (rc) { DP_NOTICE(p_hwfn, "Invalid callback functions\n"); goto q_allocate_fail; } rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info); if (rc) goto q_allocate_fail; rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info); if (rc) goto q_allocate_fail; rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info, data->input.mtu); if (rc) goto q_allocate_fail; /* Register callbacks for the Rx/Tx queues */ if (data->input.conn_type == QED_LL2_TYPE_OOO) { comp_rx_cb = qed_ll2_lb_rxq_completion; comp_tx_cb = qed_ll2_lb_txq_completion; } else { comp_rx_cb = qed_ll2_rxq_completion; comp_tx_cb = qed_ll2_txq_completion; } if (data->input.rx_num_desc) { qed_int_register_cb(p_hwfn, comp_rx_cb, &p_hwfn->p_ll2_info[i], &p_ll2_info->rx_queue.rx_sb_index, &p_ll2_info->rx_queue.p_fw_cons); p_ll2_info->rx_queue.b_cb_registered = true; } if (data->input.tx_num_desc) { qed_int_register_cb(p_hwfn, comp_tx_cb, &p_hwfn->p_ll2_info[i], &p_ll2_info->tx_queue.tx_sb_index, &p_ll2_info->tx_queue.p_fw_cons); p_ll2_info->tx_queue.b_cb_registered = true; } *data->p_connection_handle = i; return rc; q_allocate_fail: qed_ll2_release_connection(p_hwfn, i); return -ENOMEM; } static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { enum qed_ll2_error_handle error_input; enum core_error_handle error_mode; u8 action_on_error = 0; int rc; if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) return 0; DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0); error_input = p_ll2_conn->input.ai_err_packet_too_big; error_mode = qed_ll2_get_error_choice(error_input); SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode); error_input = p_ll2_conn->input.ai_err_no_buf; error_mode = qed_ll2_get_error_choice(error_input); SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode); rc = qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); if (rc) return rc; if (p_ll2_conn->rx_queue.ctx_based) { rc = qed_db_recovery_add(p_hwfn->cdev, p_ll2_conn->rx_queue.set_prod_addr, &p_ll2_conn->rx_queue.db_data, DB_REC_WIDTH_64B, DB_REC_KERNEL); } return rc; } static void qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO) return; qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); } static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn, u8 handle, u8 ll2_queue_type) { u8 qid; if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY) return p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + handle; /* QED_LL2_RX_TYPE_CTX * FW distinguishes between the legacy queues (ram based) and the * ctx based queues by the queue_id. * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy * and the queue ids above that are ctx base. */ qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] + MAX_NUM_LL2_RX_RAM_QUEUES; /* See comment on the acquire connection for how the ll2 * queues handles are divided. */ qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF); return qid; } int qed_ll2_establish_connection(void *cxt, u8 connection_handle) { struct core_conn_context *p_cxt; struct qed_ll2_tx_packet *p_pkt; struct qed_ll2_info *p_ll2_conn; struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_rx_queue *p_rx; struct qed_ll2_tx_queue *p_tx; struct qed_cxt_info cxt_info; struct qed_ptt *p_ptt; int rc = -EINVAL; u32 i, capacity; size_t desc_size; u8 qid, stats_id; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EAGAIN; p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle); if (!p_ll2_conn) { rc = -EINVAL; goto out; } p_rx = &p_ll2_conn->rx_queue; p_tx = &p_ll2_conn->tx_queue; qed_chain_reset(&p_rx->rxq_chain); qed_chain_reset(&p_rx->rcq_chain); INIT_LIST_HEAD(&p_rx->active_descq); INIT_LIST_HEAD(&p_rx->free_descq); INIT_LIST_HEAD(&p_rx->posting_descq); spin_lock_init(&p_rx->lock); capacity = qed_chain_get_capacity(&p_rx->rxq_chain); for (i = 0; i < capacity; i++) list_add_tail(&p_rx->descq_array[i].list_entry, &p_rx->free_descq); *p_rx->p_fw_cons = 0; qed_chain_reset(&p_tx->txq_chain); INIT_LIST_HEAD(&p_tx->active_descq); INIT_LIST_HEAD(&p_tx->free_descq); INIT_LIST_HEAD(&p_tx->sending_descq); spin_lock_init(&p_tx->lock); capacity = qed_chain_get_capacity(&p_tx->txq_chain); /* All bds_set elements are flexibily added. */ desc_size = struct_size(p_pkt, bds_set, p_ll2_conn->input.tx_max_bds_per_packet); for (i = 0; i < capacity; i++) { p_pkt = p_tx->descq_mem + desc_size * i; list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); } p_tx->cur_completing_bd_idx = 0; p_tx->bds_idx = 0; p_tx->b_completing_packet = false; p_tx->cur_send_packet = NULL; p_tx->cur_send_frag_num = 0; p_tx->cur_completing_frag_num = 0; *p_tx->p_fw_cons = 0; rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid); if (rc) goto out; cxt_info.iid = p_ll2_conn->cid; rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); if (rc) { DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n", p_ll2_conn->cid); goto out; } p_cxt = cxt_info.p_cxt; memset(p_cxt, 0, sizeof(*p_cxt)); qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle, p_ll2_conn->input.rx_conn_type); stats_id = qed_ll2_handle_to_stats_id(p_hwfn, p_ll2_conn->input.rx_conn_type, qid); p_ll2_conn->queue_id = qid; p_ll2_conn->tx_stats_id = stats_id; /* If there is no valid stats id for this connection, disable stats */ if (p_ll2_conn->tx_stats_id == QED_LL2_INVALID_STATS_ID) { p_ll2_conn->tx_stats_en = 0; DP_VERBOSE(p_hwfn, QED_MSG_LL2, "Disabling stats for queue %d - not enough counters\n", qid); } DP_VERBOSE(p_hwfn, QED_MSG_LL2, "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d stats_id=%d\n", p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid, stats_id); if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) { p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, TSTORM_LL2_RX_PRODS, qid); } else { /* QED_LL2_RX_TYPE_CTX - using doorbell */ p_rx->ctx_based = 1; p_rx->set_prod_addr = p_hwfn->doorbells + p_hwfn->dpi_start_offset + DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE); /* prepare db data */ p_rx->db_data.icid = cpu_to_le16((u16)p_ll2_conn->cid); SET_FIELD(p_rx->db_data.params, CORE_PWM_PROD_UPDATE_DATA_AGG_CMD, DB_AGG_CMD_SET); SET_FIELD(p_rx->db_data.params, CORE_PWM_PROD_UPDATE_DATA_RESERVED1, 0); } p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells + qed_db_addr(p_ll2_conn->cid, DQ_DEMS_LEGACY); /* prepare db data */ SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL, DQ_XCM_CORE_TX_BD_PROD_CMD); p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn); if (rc) goto out; rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn); if (rc) goto out; if (!QED_IS_RDMA_PERSONALITY(p_hwfn) && !QED_IS_NVMETCP_PERSONALITY(p_hwfn)) qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1); qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) qed_llh_add_protocol_filter(p_hwfn->cdev, 0, QED_LLH_FILTER_ETHERTYPE, ETH_P_FCOE, 0); qed_llh_add_protocol_filter(p_hwfn->cdev, 0, QED_LLH_FILTER_ETHERTYPE, ETH_P_FIP, 0); } out: qed_ptt_release(p_hwfn, p_ptt); return rc; } static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, struct qed_ll2_rx_queue *p_rx, struct qed_ll2_rx_packet *p_curp) { struct qed_ll2_rx_packet *p_posting_packet = NULL; struct core_ll2_rx_prod rx_prod = { 0, 0 }; bool b_notify_fw = false; u16 bd_prod, cq_prod; /* This handles the flushing of already posted buffers */ while (!list_empty(&p_rx->posting_descq)) { p_posting_packet = list_first_entry(&p_rx->posting_descq, struct qed_ll2_rx_packet, list_entry); list_move_tail(&p_posting_packet->list_entry, &p_rx->active_descq); b_notify_fw = true; } /* This handles the supplied packet [if there is one] */ if (p_curp) { list_add_tail(&p_curp->list_entry, &p_rx->active_descq); b_notify_fw = true; } if (!b_notify_fw) return; bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain); cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain); if (p_rx->ctx_based) { /* update producer by giving a doorbell */ p_rx->db_data.prod.bd_prod = cpu_to_le16(bd_prod); p_rx->db_data.prod.cqe_prod = cpu_to_le16(cq_prod); /* Make sure chain element is updated before ringing the * doorbell */ dma_wmb(); DIRECT_REG_WR64(p_rx->set_prod_addr, *((u64 *)&p_rx->db_data)); } else { rx_prod.bd_prod = cpu_to_le16(bd_prod); rx_prod.cqe_prod = cpu_to_le16(cq_prod); /* Make sure chain element is updated before ringing the * doorbell */ dma_wmb(); DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); } } int qed_ll2_post_rx_buffer(void *cxt, u8 connection_handle, dma_addr_t addr, u16 buf_len, void *cookie, u8 notify_fw) { struct qed_hwfn *p_hwfn = cxt; struct core_rx_bd_with_buff_len *p_curb = NULL; struct qed_ll2_rx_packet *p_curp = NULL; struct qed_ll2_info *p_ll2_conn; struct qed_ll2_rx_queue *p_rx; unsigned long flags; void *p_data; int rc = 0; p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); if (!p_ll2_conn) return -EINVAL; p_rx = &p_ll2_conn->rx_queue; if (!p_rx->set_prod_addr) return -EIO; spin_lock_irqsave(&p_rx->lock, flags); if (!list_empty(&p_rx->free_descq)) p_curp = list_first_entry(&p_rx->free_descq, struct qed_ll2_rx_packet, list_entry); if (p_curp) { if (qed_chain_get_elem_left(&p_rx->rxq_chain) && qed_chain_get_elem_left(&p_rx->rcq_chain)) { p_data = qed_chain_produce(&p_rx->rxq_chain); p_curb = (struct core_rx_bd_with_buff_len *)p_data; qed_chain_produce(&p_rx->rcq_chain); } } /* If we're lacking entries, let's try to flush buffers to FW */ if (!p_curp || !p_curb) { rc = -EBUSY; p_curp = NULL; goto out_notify; } /* We have an Rx packet we can fill */ DMA_REGPAIR_LE(p_curb->addr, addr); p_curb->buff_length = cpu_to_le16(buf_len); p_curp->rx_buf_addr = addr; p_curp->cookie = cookie; p_curp->rxq_bd = p_curb; p_curp->buf_length = buf_len; list_del(&p_curp->list_entry); /* Check if we only want to enqueue this packet without informing FW */ if (!notify_fw) { list_add_tail(&p_curp->list_entry, &p_rx->posting_descq); goto out; } out_notify: qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp); out: spin_unlock_irqrestore(&p_rx->lock, flags); return rc; } static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn, struct qed_ll2_tx_queue *p_tx, struct qed_ll2_tx_packet *p_curp, struct qed_ll2_tx_pkt_info *pkt, u8 notify_fw) { list_del(&p_curp->list_entry); p_curp->cookie = pkt->cookie; p_curp->bd_used = pkt->num_of_bds; p_curp->notify_fw = notify_fw; p_tx->cur_send_packet = p_curp; p_tx->cur_send_frag_num = 0; p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag; p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len; p_tx->cur_send_frag_num++; } static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2, struct qed_ll2_tx_packet *p_curp, struct qed_ll2_tx_pkt_info *pkt) { struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain; u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain); struct core_tx_bd *start_bd = NULL; enum core_roce_flavor_type roce_flavor; enum core_tx_dest tx_dest; u16 bd_data = 0, frag_idx; u16 bitfield1; roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE : CORE_RROCE; switch (pkt->tx_dest) { case QED_LL2_TX_DEST_NW: tx_dest = CORE_TX_DEST_NW; break; case QED_LL2_TX_DEST_LB: tx_dest = CORE_TX_DEST_LB; break; case QED_LL2_TX_DEST_DROP: tx_dest = CORE_TX_DEST_DROP; break; default: tx_dest = CORE_TX_DEST_LB; break; } start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); if (likely(QED_IS_IWARP_PERSONALITY(p_hwfn) && p_ll2->input.conn_type == QED_LL2_TYPE_OOO)) { start_bd->nw_vlan_or_lb_echo = cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE); } else { start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan); if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && p_ll2->input.conn_type == QED_LL2_TYPE_FCOE) pkt->remove_stag = true; } bitfield1 = le16_to_cpu(start_bd->bitfield1); SET_FIELD(bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, pkt->l4_hdr_offset_w); SET_FIELD(bitfield1, CORE_TX_BD_TX_DST, tx_dest); start_bd->bitfield1 = cpu_to_le16(bitfield1); bd_data |= pkt->bd_flags; SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1); SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds); SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor); SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum)); SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum)); SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len)); SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION, !!(pkt->remove_stag)); start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data); DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag); start_bd->nbytes = cpu_to_le16(pkt->first_frag_len); DP_VERBOSE(p_hwfn, (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", p_ll2->queue_id, p_ll2->cid, p_ll2->input.conn_type, prod_idx, pkt->first_frag_len, pkt->num_of_bds, le32_to_cpu(start_bd->addr.hi), le32_to_cpu(start_bd->addr.lo)); if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds) return; /* Need to provide the packet with additional BDs for frags */ for (frag_idx = p_ll2->tx_queue.cur_send_frag_num; frag_idx < pkt->num_of_bds; frag_idx++) { struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd; *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); (*p_bd)->bd_data.as_bitfield = 0; (*p_bd)->bitfield1 = 0; p_curp->bds_set[frag_idx].tx_frag = 0; p_curp->bds_set[frag_idx].frag_len = 0; } } /* This should be called while the Txq spinlock is being held */ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw; struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; struct qed_ll2_tx_packet *p_pkt = NULL; u16 bd_prod; /* If there are missing BDs, don't do anything now */ if (p_ll2_conn->tx_queue.cur_send_frag_num != p_ll2_conn->tx_queue.cur_send_packet->bd_used) return; /* Push the current packet to the list and clean after it */ list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry, &p_ll2_conn->tx_queue.sending_descq); p_ll2_conn->tx_queue.cur_send_packet = NULL; p_ll2_conn->tx_queue.cur_send_frag_num = 0; /* Notify FW of packet only if requested to */ if (!b_notify) return; bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain); while (!list_empty(&p_tx->sending_descq)) { p_pkt = list_first_entry(&p_tx->sending_descq, struct qed_ll2_tx_packet, list_entry); if (!p_pkt) break; list_move_tail(&p_pkt->list_entry, &p_tx->active_descq); } p_tx->db_msg.spq_prod = cpu_to_le16(bd_prod); /* Make sure the BDs data is updated before ringing the doorbell */ wmb(); DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg)); DP_VERBOSE(p_hwfn, (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", p_ll2_conn->queue_id, p_ll2_conn->cid, p_ll2_conn->input.conn_type, p_tx->db_msg.spq_prod); } int qed_ll2_prepare_tx_packet(void *cxt, u8 connection_handle, struct qed_ll2_tx_pkt_info *pkt, bool notify_fw) { struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_tx_packet *p_curp = NULL; struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ll2_tx_queue *p_tx; struct qed_chain *p_tx_chain; unsigned long flags; int rc = 0; p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); if (unlikely(!p_ll2_conn)) return -EINVAL; p_tx = &p_ll2_conn->tx_queue; p_tx_chain = &p_tx->txq_chain; if (unlikely(pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)) return -EIO; spin_lock_irqsave(&p_tx->lock, flags); if (unlikely(p_tx->cur_send_packet)) { rc = -EEXIST; goto out; } /* Get entry, but only if we have tx elements for it */ if (unlikely(!list_empty(&p_tx->free_descq))) p_curp = list_first_entry(&p_tx->free_descq, struct qed_ll2_tx_packet, list_entry); if (unlikely(p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)) p_curp = NULL; if (unlikely(!p_curp)) { rc = -EBUSY; goto out; } /* Prepare packet and BD, and perhaps send a doorbell to FW */ qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw); qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt); qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); out: spin_unlock_irqrestore(&p_tx->lock, flags); return rc; } int qed_ll2_set_fragment_of_tx_packet(void *cxt, u8 connection_handle, dma_addr_t addr, u16 nbytes) { struct qed_ll2_tx_packet *p_cur_send_packet = NULL; struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_info *p_ll2_conn = NULL; u16 cur_send_frag_num = 0; struct core_tx_bd *p_bd; unsigned long flags; p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); if (unlikely(!p_ll2_conn)) return -EINVAL; if (unlikely(!p_ll2_conn->tx_queue.cur_send_packet)) return -EINVAL; p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet; cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num; if (unlikely(cur_send_frag_num >= p_cur_send_packet->bd_used)) return -EINVAL; /* Fill the BD information, and possibly notify FW */ p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd; DMA_REGPAIR_LE(p_bd->addr, addr); p_bd->nbytes = cpu_to_le16(nbytes); p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr; p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes; p_ll2_conn->tx_queue.cur_send_frag_num++; spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags); qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags); return 0; } int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) { struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_info *p_ll2_conn = NULL; int rc = -EINVAL; struct qed_ptt *p_ptt; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EAGAIN; p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle); if (!p_ll2_conn) { rc = -EINVAL; goto out; } /* Stop Tx & Rx of connection, if needed */ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { p_ll2_conn->tx_queue.b_cb_registered = false; smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); if (rc) goto out; qed_ll2_txq_flush(p_hwfn, connection_handle); qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); } if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { p_ll2_conn->rx_queue.b_cb_registered = false; smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ if (p_ll2_conn->rx_queue.ctx_based) qed_db_recovery_del(p_hwfn->cdev, p_ll2_conn->rx_queue.set_prod_addr, &p_ll2_conn->rx_queue.db_data); rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); if (rc) goto out; qed_ll2_rxq_flush(p_hwfn, connection_handle); qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index); } if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) qed_llh_remove_protocol_filter(p_hwfn->cdev, 0, QED_LLH_FILTER_ETHERTYPE, ETH_P_FCOE, 0); qed_llh_remove_protocol_filter(p_hwfn->cdev, 0, QED_LLH_FILTER_ETHERTYPE, ETH_P_FIP, 0); } out: qed_ptt_release(p_hwfn, p_ptt); return rc; } static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { struct qed_ooo_buffer *p_buffer; if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO) return; qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, p_hwfn->p_ooo_info))) { dma_free_coherent(&p_hwfn->cdev->pdev->dev, p_buffer->rx_buffer_size, p_buffer->rx_buffer_virt_addr, p_buffer->rx_buffer_phys_addr); kfree(p_buffer); } } void qed_ll2_release_connection(void *cxt, u8 connection_handle) { struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_info *p_ll2_conn = NULL; p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); if (!p_ll2_conn) return; kfree(p_ll2_conn->tx_queue.descq_mem); qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain); kfree(p_ll2_conn->rx_queue.descq_array); qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain); qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain); qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid); qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn); mutex_lock(&p_ll2_conn->mutex); p_ll2_conn->b_active = false; mutex_unlock(&p_ll2_conn->mutex); } int qed_ll2_alloc(struct qed_hwfn *p_hwfn) { struct qed_ll2_info *p_ll2_connections; u8 i; /* Allocate LL2's set struct */ p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS, sizeof(struct qed_ll2_info), GFP_KERNEL); if (!p_ll2_connections) { DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n"); return -ENOMEM; } for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++) p_ll2_connections[i].my_id = i; p_hwfn->p_ll2_info = p_ll2_connections; return 0; } void qed_ll2_setup(struct qed_hwfn *p_hwfn) { int i; for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++) mutex_init(&p_hwfn->p_ll2_info[i].mutex); } void qed_ll2_free(struct qed_hwfn *p_hwfn) { if (!p_hwfn->p_ll2_info) return; kfree(p_hwfn->p_ll2_info); p_hwfn->p_ll2_info = NULL; } static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_ll2_stats *p_stats) { struct core_ll2_port_stats port_stats; memset(&port_stats, 0, sizeof(port_stats)); qed_memcpy_from(p_hwfn, p_ptt, &port_stats, BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)), sizeof(port_stats)); p_stats->gsi_invalid_hdr += HILO_64_REGPAIR(port_stats.gsi_invalid_hdr); p_stats->gsi_invalid_pkt_length += HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length); p_stats->gsi_unsupported_pkt_typ += HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ); p_stats->gsi_crcchksm_error += HILO_64_REGPAIR(port_stats.gsi_crcchksm_error); } static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_ll2_info *p_ll2_conn, struct qed_ll2_stats *p_stats) { struct core_ll2_tstorm_per_queue_stat tstats; u8 qid = p_ll2_conn->queue_id; u32 tstats_addr; memset(&tstats, 0, sizeof(tstats)); tstats_addr = BAR0_MAP_REG_TSDM_RAM + CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid); qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats)); p_stats->packet_too_big_discard += HILO_64_REGPAIR(tstats.packet_too_big_discard); p_stats->no_buff_discard += HILO_64_REGPAIR(tstats.no_buff_discard); } static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_ll2_info *p_ll2_conn, struct qed_ll2_stats *p_stats) { struct core_ll2_ustorm_per_queue_stat ustats; u8 qid = p_ll2_conn->queue_id; u32 ustats_addr; memset(&ustats, 0, sizeof(ustats)); ustats_addr = BAR0_MAP_REG_USDM_RAM + CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid); qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats)); p_stats->rcv_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes); p_stats->rcv_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes); p_stats->rcv_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes); p_stats->rcv_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); p_stats->rcv_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); p_stats->rcv_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); } static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_ll2_info *p_ll2_conn, struct qed_ll2_stats *p_stats) { struct core_ll2_pstorm_per_queue_stat pstats; u8 stats_id = p_ll2_conn->tx_stats_id; u32 pstats_addr; memset(&pstats, 0, sizeof(pstats)); pstats_addr = BAR0_MAP_REG_PSDM_RAM + CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id); qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats)); p_stats->sent_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes); p_stats->sent_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes); p_stats->sent_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes); p_stats->sent_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts); p_stats->sent_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts); p_stats->sent_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts); } static int __qed_ll2_get_stats(void *cxt, u8 connection_handle, struct qed_ll2_stats *p_stats) { struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ptt *p_ptt; if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) || !p_hwfn->p_ll2_info) return -EINVAL; p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle]; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) { DP_ERR(p_hwfn, "Failed to acquire ptt\n"); return -EINVAL; } if (p_ll2_conn->input.gsi_enable) _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats); _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats); _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats); if (p_ll2_conn->tx_stats_en) _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats); qed_ptt_release(p_hwfn, p_ptt); return 0; } int qed_ll2_get_stats(void *cxt, u8 connection_handle, struct qed_ll2_stats *p_stats) { memset(p_stats, 0, sizeof(*p_stats)); return __qed_ll2_get_stats(cxt, connection_handle, p_stats); } static void qed_ll2b_release_rx_packet(void *cxt, u8 connection_handle, void *cookie, dma_addr_t rx_buf_addr, bool b_last_packet) { struct qed_hwfn *p_hwfn = cxt; qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie); } static void qed_ll2_register_cb_ops(struct qed_dev *cdev, const struct qed_ll2_cb_ops *ops, void *cookie) { cdev->ll2->cbs = ops; cdev->ll2->cb_cookie = cookie; } static struct qed_ll2_cbs ll2_cbs = { .rx_comp_cb = &qed_ll2b_complete_rx_packet, .rx_release_cb = &qed_ll2b_release_rx_packet, .tx_comp_cb = &qed_ll2b_complete_tx_packet, .tx_release_cb = &qed_ll2b_complete_tx_packet, }; static void qed_ll2_set_conn_data(struct qed_hwfn *p_hwfn, struct qed_ll2_acquire_data *data, struct qed_ll2_params *params, enum qed_ll2_conn_type conn_type, u8 *handle, bool lb) { memset(data, 0, sizeof(*data)); data->input.conn_type = conn_type; data->input.mtu = params->mtu; data->input.rx_num_desc = QED_LL2_RX_SIZE; data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets; data->input.rx_vlan_removal_en = params->rx_vlan_stripping; data->input.tx_num_desc = QED_LL2_TX_SIZE; data->p_connection_handle = handle; data->cbs = &ll2_cbs; ll2_cbs.cookie = p_hwfn; if (lb) { data->input.tx_tc = PKT_LB_TC; data->input.tx_dest = QED_LL2_TX_DEST_LB; } else { data->input.tx_tc = 0; data->input.tx_dest = QED_LL2_TX_DEST_NW; } } static int qed_ll2_start_ooo(struct qed_hwfn *p_hwfn, struct qed_ll2_params *params) { u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; struct qed_ll2_acquire_data data; int rc; qed_ll2_set_conn_data(p_hwfn, &data, params, QED_LL2_TYPE_OOO, handle, true); rc = qed_ll2_acquire_connection(p_hwfn, &data); if (rc) { DP_INFO(p_hwfn, "Failed to acquire LL2 OOO connection\n"); goto out; } rc = qed_ll2_establish_connection(p_hwfn, *handle); if (rc) { DP_INFO(p_hwfn, "Failed to establish LL2 OOO connection\n"); goto fail; } return 0; fail: qed_ll2_release_connection(p_hwfn, *handle); out: *handle = QED_LL2_UNUSED_HANDLE; return rc; } static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev) { return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) || QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev)) || QED_IS_NVMETCP_PERSONALITY(QED_LEADING_HWFN(cdev))) && (QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev)); } static int __qed_ll2_stop(struct qed_hwfn *p_hwfn) { struct qed_dev *cdev = p_hwfn->cdev; int rc; rc = qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle); if (rc) DP_INFO(cdev, "Failed to terminate LL2 connection\n"); qed_ll2_release_connection(p_hwfn, cdev->ll2->handle); return rc; } static int qed_ll2_stop(struct qed_dev *cdev) { bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev); struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); int rc = 0, rc2 = 0; if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE) return 0; if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address); qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address); eth_zero_addr(cdev->ll2_mac_address); if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) qed_ll2_stop_ooo(p_hwfn); /* In CMT mode, LL2 is always started on engine 0 for a storage PF */ if (b_is_storage_eng1) { rc2 = __qed_ll2_stop(QED_LEADING_HWFN(cdev)); if (rc2) DP_NOTICE(QED_LEADING_HWFN(cdev), "Failed to stop LL2 on engine 0\n"); } rc = __qed_ll2_stop(p_hwfn); if (rc) DP_NOTICE(p_hwfn, "Failed to stop LL2\n"); qed_ll2_kill_buffers(cdev); cdev->ll2->handle = QED_LL2_UNUSED_HANDLE; return rc | rc2; } static int __qed_ll2_start(struct qed_hwfn *p_hwfn, struct qed_ll2_params *params) { struct qed_ll2_buffer *buffer, *tmp_buffer; struct qed_dev *cdev = p_hwfn->cdev; enum qed_ll2_conn_type conn_type; struct qed_ll2_acquire_data data; int rc, rx_cnt; switch (p_hwfn->hw_info.personality) { case QED_PCI_FCOE: conn_type = QED_LL2_TYPE_FCOE; break; case QED_PCI_ISCSI: case QED_PCI_NVMETCP: conn_type = QED_LL2_TYPE_TCP_ULP; break; case QED_PCI_ETH_ROCE: conn_type = QED_LL2_TYPE_ROCE; break; default: conn_type = QED_LL2_TYPE_TEST; } qed_ll2_set_conn_data(p_hwfn, &data, params, conn_type, &cdev->ll2->handle, false); rc = qed_ll2_acquire_connection(p_hwfn, &data); if (rc) { DP_INFO(p_hwfn, "Failed to acquire LL2 connection\n"); return rc; } rc = qed_ll2_establish_connection(p_hwfn, cdev->ll2->handle); if (rc) { DP_INFO(p_hwfn, "Failed to establish LL2 connection\n"); goto release_conn; } /* Post all Rx buffers to FW */ spin_lock_bh(&cdev->ll2->lock); rx_cnt = cdev->ll2->rx_cnt; list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) { rc = qed_ll2_post_rx_buffer(p_hwfn, cdev->ll2->handle, buffer->phys_addr, 0, buffer, 1); if (rc) { DP_INFO(p_hwfn, "Failed to post an Rx buffer; Deleting it\n"); dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, cdev->ll2->rx_size, DMA_FROM_DEVICE); kfree(buffer->data); list_del(&buffer->list); kfree(buffer); } else { rx_cnt++; } } spin_unlock_bh(&cdev->ll2->lock); if (rx_cnt == cdev->ll2->rx_cnt) { DP_NOTICE(p_hwfn, "Failed passing even a single Rx buffer\n"); goto terminate_conn; } cdev->ll2->rx_cnt = rx_cnt; return 0; terminate_conn: qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle); release_conn: qed_ll2_release_connection(p_hwfn, cdev->ll2->handle); return rc; } static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) { bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev); struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); struct qed_ll2_buffer *buffer; int rx_num_desc, i, rc; if (!is_valid_ether_addr(params->ll2_mac_address)) { DP_NOTICE(cdev, "Invalid Ethernet address\n"); return -EINVAL; } WARN_ON(!cdev->ll2->cbs); /* Initialize LL2 locks & lists */ INIT_LIST_HEAD(&cdev->ll2->list); spin_lock_init(&cdev->ll2->lock); cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN + L1_CACHE_BYTES + params->mtu; /* Allocate memory for LL2. * In CMT mode, in case of a storage PF which is affintized to engine 1, * LL2 is started also on engine 0 and thus we need twofold buffers. */ rx_num_desc = QED_LL2_RX_SIZE * (b_is_storage_eng1 ? 2 : 1); DP_INFO(cdev, "Allocating %d LL2 buffers of size %08x bytes\n", rx_num_desc, cdev->ll2->rx_size); for (i = 0; i < rx_num_desc; i++) { buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { DP_INFO(cdev, "Failed to allocate LL2 buffers\n"); rc = -ENOMEM; goto err0; } rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data, &buffer->phys_addr); if (rc) { kfree(buffer); goto err0; } list_add_tail(&buffer->list, &cdev->ll2->list); } rc = __qed_ll2_start(p_hwfn, params); if (rc) { DP_NOTICE(cdev, "Failed to start LL2\n"); goto err0; } /* In CMT mode, always need to start LL2 on engine 0 for a storage PF, * since broadcast/mutlicast packets are routed to engine 0. */ if (b_is_storage_eng1) { rc = __qed_ll2_start(QED_LEADING_HWFN(cdev), params); if (rc) { DP_NOTICE(QED_LEADING_HWFN(cdev), "Failed to start LL2 on engine 0\n"); goto err1; } } if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) { DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n"); rc = qed_ll2_start_ooo(p_hwfn, params); if (rc) { DP_NOTICE(cdev, "Failed to start OOO LL2\n"); goto err2; } } if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) { rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address); if (rc) { DP_NOTICE(cdev, "Failed to add an LLH filter\n"); goto err3; } } ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address); return 0; err3: if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) qed_ll2_stop_ooo(p_hwfn); err2: if (b_is_storage_eng1) __qed_ll2_stop(QED_LEADING_HWFN(cdev)); err1: __qed_ll2_stop(p_hwfn); err0: qed_ll2_kill_buffers(cdev); cdev->ll2->handle = QED_LL2_UNUSED_HANDLE; return rc; } static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, unsigned long xmit_flags) { struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); struct qed_ll2_tx_pkt_info pkt; const skb_frag_t *frag; u8 flags = 0, nr_frags; int rc = -EINVAL, i; dma_addr_t mapping; u16 vlan = 0; if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); return -EINVAL; } /* Cache number of fragments from SKB since SKB may be freed by * the completion routine after calling qed_ll2_prepare_tx_packet() */ nr_frags = skb_shinfo(skb)->nr_frags; if (unlikely(1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET)) { DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", 1 + nr_frags); return -EINVAL; } mapping = dma_map_single(&cdev->pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) { DP_NOTICE(cdev, "SKB mapping failed\n"); return -EINVAL; } /* Request HW to calculate IP csum */ if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) && ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT); if (skb_vlan_tag_present(skb)) { vlan = skb_vlan_tag_get(skb); flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT); } memset(&pkt, 0, sizeof(pkt)); pkt.num_of_bds = 1 + nr_frags; pkt.vlan = vlan; pkt.bd_flags = flags; pkt.tx_dest = QED_LL2_TX_DEST_NW; pkt.first_frag = mapping; pkt.first_frag_len = skb->len; pkt.cookie = skb; if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) && test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) pkt.remove_stag = true; /* qed_ll2_prepare_tx_packet() may actually send the packet if * there are no fragments in the skb and subsequently the completion * routine may run and free the SKB, so no dereferencing the SKB * beyond this point unless skb has any fragments. */ rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle, &pkt, 1); if (unlikely(rc)) goto err; for (i = 0; i < nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) { DP_NOTICE(cdev, "Unable to map frag - dropping packet\n"); rc = -ENOMEM; goto err; } rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, cdev->ll2->handle, mapping, skb_frag_size(frag)); /* if failed not much to do here, partial packet has been posted * we can't free memory, will need to wait for completion */ if (unlikely(rc)) goto err2; } return 0; err: dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE); err2: return rc; } static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats) { bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev); struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); int rc; if (!cdev->ll2) return -EINVAL; rc = qed_ll2_get_stats(p_hwfn, cdev->ll2->handle, stats); if (rc) { DP_NOTICE(p_hwfn, "Failed to get LL2 stats\n"); return rc; } /* In CMT mode, LL2 is always started on engine 0 for a storage PF */ if (b_is_storage_eng1) { rc = __qed_ll2_get_stats(QED_LEADING_HWFN(cdev), cdev->ll2->handle, stats); if (rc) { DP_NOTICE(QED_LEADING_HWFN(cdev), "Failed to get LL2 stats on engine 0\n"); return rc; } } return 0; } const struct qed_ll2_ops qed_ll2_ops_pass = { .start = &qed_ll2_start, .stop = &qed_ll2_stop, .start_xmit = &qed_ll2_start_xmit, .register_cb_ops = &qed_ll2_register_cb_ops, .get_stats = &qed_ll2_stats, }; int qed_ll2_alloc_if(struct qed_dev *cdev) { cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL); return cdev->ll2 ? 0 : -ENOMEM; } void qed_ll2_dealloc_if(struct qed_dev *cdev) { kfree(cdev->ll2); cdev->ll2 = NULL; }
linux-master
drivers/net/ethernet/qlogic/qed/qed_ll2.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* Copyright 2021 Marvell. All rights reserved. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/types.h> #include <asm/byteorder.h> #include <linux/qed/common_hsi.h> #include <linux/qed/storage_common.h> #include <linux/qed/nvmetcp_common.h> #include <linux/qed/qed_nvmetcp_if.h> #include "qed_nvmetcp_fw_funcs.h" #define NVMETCP_NUM_SGES_IN_CACHE 0x4 bool nvmetcp_is_slow_sgl(u16 num_sges, bool small_mid_sge) { return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge); } void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params, struct scsi_cached_sges *ctx_data_desc, struct storage_sgl_task_params *sgl_params) { u8 num_sges_to_init = (u8)(sgl_params->num_sges > NVMETCP_NUM_SGES_IN_CACHE ? NVMETCP_NUM_SGES_IN_CACHE : sgl_params->num_sges); u8 sge_index; /* sgl params */ ctx_sgl_params->sgl_addr.lo = cpu_to_le32(sgl_params->sgl_phys_addr.lo); ctx_sgl_params->sgl_addr.hi = cpu_to_le32(sgl_params->sgl_phys_addr.hi); ctx_sgl_params->sgl_total_length = cpu_to_le32(sgl_params->total_buffer_size); ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_params->num_sges); for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) { ctx_data_desc->sge[sge_index].sge_addr.lo = cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.lo); ctx_data_desc->sge[sge_index].sge_addr.hi = cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.hi); ctx_data_desc->sge[sge_index].sge_len = cpu_to_le32(sgl_params->sgl[sge_index].sge_len); } } static inline u32 calc_rw_task_size(struct nvmetcp_task_params *task_params, enum nvmetcp_task_type task_type) { u32 io_size; if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE) io_size = task_params->tx_io_size; else io_size = task_params->rx_io_size; if (unlikely(!io_size)) return 0; return io_size; } static inline void init_sqe(struct nvmetcp_task_params *task_params, struct storage_sgl_task_params *sgl_task_params, enum nvmetcp_task_type task_type) { if (!task_params->sqe) return; memset(task_params->sqe, 0, sizeof(*task_params->sqe)); task_params->sqe->task_id = cpu_to_le16(task_params->itid); switch (task_type) { case NVMETCP_TASK_TYPE_HOST_WRITE: { u32 buf_size = 0; u32 num_sges = 0; SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1); SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, NVMETCP_WQE_TYPE_NORMAL); if (task_params->tx_io_size) { if (task_params->send_write_incapsule) buf_size = calc_rw_task_size(task_params, task_type); if (nvmetcp_is_slow_sgl(sgl_task_params->num_sges, sgl_task_params->small_mid_sge)) num_sges = NVMETCP_WQE_NUM_SGES_SLOWIO; else num_sges = min((u16)sgl_task_params->num_sges, (u16)SCSI_NUM_SGES_SLOW_SGL_THR); } SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, num_sges); SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, buf_size); } break; case NVMETCP_TASK_TYPE_HOST_READ: { SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, NVMETCP_WQE_TYPE_NORMAL); SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1); } break; case NVMETCP_TASK_TYPE_INIT_CONN_REQUEST: { SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, NVMETCP_WQE_TYPE_MIDDLE_PATH); if (task_params->tx_io_size) { SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, task_params->tx_io_size); SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, min((u16)sgl_task_params->num_sges, (u16)SCSI_NUM_SGES_SLOW_SGL_THR)); } } break; case NVMETCP_TASK_TYPE_CLEANUP: SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, NVMETCP_WQE_TYPE_TASK_CLEANUP); default: break; } } /* The following function initializes of NVMeTCP task params */ static inline void init_nvmetcp_task_params(struct e5_nvmetcp_task_context *context, struct nvmetcp_task_params *task_params, enum nvmetcp_task_type task_type) { context->ystorm_st_context.state.cccid = task_params->host_cccid; SET_FIELD(context->ustorm_st_context.error_flags, USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP, 1); context->ustorm_st_context.nvme_tcp_opaque_lo = cpu_to_le32(task_params->opq.lo); context->ustorm_st_context.nvme_tcp_opaque_hi = cpu_to_le32(task_params->opq.hi); } /* The following function initializes default values to all tasks */ static inline void init_default_nvmetcp_task(struct nvmetcp_task_params *task_params, void *pdu_header, void *nvme_cmd, enum nvmetcp_task_type task_type) { struct e5_nvmetcp_task_context *context = task_params->context; const u8 val_byte = context->mstorm_ag_context.cdu_validation; u8 dw_index; memset(context, 0, sizeof(*context)); init_nvmetcp_task_params(context, task_params, (enum nvmetcp_task_type)task_type); /* Swapping requirements used below, will be removed in future FW versions */ if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE || task_type == NVMETCP_TASK_TYPE_HOST_READ) { for (dw_index = 0; dw_index < QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32); dw_index++) context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] = cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index])); for (dw_index = QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32); dw_index < QED_NVMETCP_CMD_HDR_SIZE / sizeof(u32); dw_index++) context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] = cpu_to_le32(__swab32(((u32 *)nvme_cmd)[dw_index - 2])); } else { for (dw_index = 0; dw_index < QED_NVMETCP_NON_IO_HDR_SIZE / sizeof(u32); dw_index++) context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] = cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index])); } /* M-Storm Context: */ context->mstorm_ag_context.cdu_validation = val_byte; context->mstorm_st_context.task_type = (u8)(task_type); context->mstorm_ag_context.task_cid = cpu_to_le16(task_params->conn_icid); /* Ustorm Context: */ SET_FIELD(context->ustorm_ag_context.flags1, E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV, 1); context->ustorm_st_context.task_type = (u8)(task_type); context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number; context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid); } /* The following function initializes the U-Storm Task Contexts */ static inline void init_ustorm_task_contexts(struct ustorm_nvmetcp_task_st_ctx *ustorm_st_context, struct e5_ustorm_nvmetcp_task_ag_ctx *ustorm_ag_context, u32 remaining_recv_len, u32 expected_data_transfer_len, u8 num_sges, bool tx_dif_conn_err_en) { /* Remaining data to be received in bytes. Used in validations*/ ustorm_st_context->rem_rcv_len = cpu_to_le32(remaining_recv_len); ustorm_ag_context->exp_data_acked = cpu_to_le32(expected_data_transfer_len); ustorm_st_context->exp_data_transfer_len = cpu_to_le32(expected_data_transfer_len); SET_FIELD(ustorm_st_context->reg1_map, REG1_NUM_SGES, num_sges); SET_FIELD(ustorm_ag_context->flags2, E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN, tx_dif_conn_err_en ? 1 : 0); } /* The following function initializes Local Completion Contexts: */ static inline void set_local_completion_context(struct e5_nvmetcp_task_context *context) { SET_FIELD(context->ystorm_st_context.state.flags, YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP, 1); SET_FIELD(context->ustorm_st_context.flags, USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP, 1); } /* Common Fastpath task init function: */ static inline void init_rw_nvmetcp_task(struct nvmetcp_task_params *task_params, enum nvmetcp_task_type task_type, void *pdu_header, void *nvme_cmd, struct storage_sgl_task_params *sgl_task_params) { struct e5_nvmetcp_task_context *context = task_params->context; u32 task_size = calc_rw_task_size(task_params, task_type); bool slow_io = false; u8 num_sges = 0; init_default_nvmetcp_task(task_params, pdu_header, nvme_cmd, task_type); /* Tx/Rx: */ if (task_params->tx_io_size) { /* if data to transmit: */ init_scsi_sgl_context(&context->ystorm_st_context.state.sgl_params, &context->ystorm_st_context.state.data_desc, sgl_task_params); slow_io = nvmetcp_is_slow_sgl(sgl_task_params->num_sges, sgl_task_params->small_mid_sge); num_sges = (u8)(!slow_io ? min((u32)sgl_task_params->num_sges, (u32)SCSI_NUM_SGES_SLOW_SGL_THR) : NVMETCP_WQE_NUM_SGES_SLOWIO); if (slow_io) { SET_FIELD(context->ystorm_st_context.state.flags, YSTORM_NVMETCP_TASK_STATE_SLOW_IO, 1); } } else if (task_params->rx_io_size) { /* if data to receive: */ init_scsi_sgl_context(&context->mstorm_st_context.sgl_params, &context->mstorm_st_context.data_desc, sgl_task_params); num_sges = (u8)(!nvmetcp_is_slow_sgl(sgl_task_params->num_sges, sgl_task_params->small_mid_sge) ? min((u32)sgl_task_params->num_sges, (u32)SCSI_NUM_SGES_SLOW_SGL_THR) : NVMETCP_WQE_NUM_SGES_SLOWIO); context->mstorm_st_context.rem_task_size = cpu_to_le32(task_size); } /* Ustorm context: */ init_ustorm_task_contexts(&context->ustorm_st_context, &context->ustorm_ag_context, /* Remaining Receive length is the Task Size */ task_size, /* The size of the transmitted task */ task_size, /* num_sges */ num_sges, false); /* Set exp_data_acked */ if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE) { if (task_params->send_write_incapsule) context->ustorm_ag_context.exp_data_acked = task_size; else context->ustorm_ag_context.exp_data_acked = 0; } else if (task_type == NVMETCP_TASK_TYPE_HOST_READ) { context->ustorm_ag_context.exp_data_acked = 0; } context->ustorm_ag_context.exp_cont_len = 0; init_sqe(task_params, sgl_task_params, task_type); } static void init_common_initiator_read_task(struct nvmetcp_task_params *task_params, struct nvme_tcp_cmd_pdu *cmd_pdu_header, struct nvme_command *nvme_cmd, struct storage_sgl_task_params *sgl_task_params) { init_rw_nvmetcp_task(task_params, NVMETCP_TASK_TYPE_HOST_READ, cmd_pdu_header, nvme_cmd, sgl_task_params); } void init_nvmetcp_host_read_task(struct nvmetcp_task_params *task_params, struct nvme_tcp_cmd_pdu *cmd_pdu_header, struct nvme_command *nvme_cmd, struct storage_sgl_task_params *sgl_task_params) { init_common_initiator_read_task(task_params, (void *)cmd_pdu_header, (void *)nvme_cmd, sgl_task_params); } static void init_common_initiator_write_task(struct nvmetcp_task_params *task_params, struct nvme_tcp_cmd_pdu *cmd_pdu_header, struct nvme_command *nvme_cmd, struct storage_sgl_task_params *sgl_task_params) { init_rw_nvmetcp_task(task_params, NVMETCP_TASK_TYPE_HOST_WRITE, cmd_pdu_header, nvme_cmd, sgl_task_params); } void init_nvmetcp_host_write_task(struct nvmetcp_task_params *task_params, struct nvme_tcp_cmd_pdu *cmd_pdu_header, struct nvme_command *nvme_cmd, struct storage_sgl_task_params *sgl_task_params) { init_common_initiator_write_task(task_params, (void *)cmd_pdu_header, (void *)nvme_cmd, sgl_task_params); } static void init_common_login_request_task(struct nvmetcp_task_params *task_params, void *login_req_pdu_header, struct storage_sgl_task_params *tx_sgl_task_params, struct storage_sgl_task_params *rx_sgl_task_params) { struct e5_nvmetcp_task_context *context = task_params->context; init_default_nvmetcp_task(task_params, (void *)login_req_pdu_header, NULL, NVMETCP_TASK_TYPE_INIT_CONN_REQUEST); /* Ustorm Context: */ init_ustorm_task_contexts(&context->ustorm_st_context, &context->ustorm_ag_context, /* Remaining Receive length is the Task Size */ task_params->rx_io_size ? rx_sgl_task_params->total_buffer_size : 0, /* The size of the transmitted task */ task_params->tx_io_size ? tx_sgl_task_params->total_buffer_size : 0, 0, /* num_sges */ 0); /* tx_dif_conn_err_en */ /* SGL context: */ if (task_params->tx_io_size) init_scsi_sgl_context(&context->ystorm_st_context.state.sgl_params, &context->ystorm_st_context.state.data_desc, tx_sgl_task_params); if (task_params->rx_io_size) init_scsi_sgl_context(&context->mstorm_st_context.sgl_params, &context->mstorm_st_context.data_desc, rx_sgl_task_params); context->mstorm_st_context.rem_task_size = cpu_to_le32(task_params->rx_io_size ? rx_sgl_task_params->total_buffer_size : 0); init_sqe(task_params, tx_sgl_task_params, NVMETCP_TASK_TYPE_INIT_CONN_REQUEST); } /* The following function initializes Login task in Host mode: */ void init_nvmetcp_init_conn_req_task(struct nvmetcp_task_params *task_params, struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr, struct storage_sgl_task_params *tx_sgl_task_params, struct storage_sgl_task_params *rx_sgl_task_params) { init_common_login_request_task(task_params, init_conn_req_pdu_hdr, tx_sgl_task_params, rx_sgl_task_params); } void init_cleanup_task_nvmetcp(struct nvmetcp_task_params *task_params) { init_sqe(task_params, NULL, NVMETCP_TASK_TYPE_CLEANUP); }
linux-master
drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <asm/byteorder.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include "qed.h" #include "qed_cxt.h" #include "qed_dev_api.h" #include "qed_hsi.h" #include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_iscsi.h" #include "qed_mcp.h" #include "qed_ooo.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" #include "qed_rdma.h" /*************************************************************************** * Structures & Definitions ***************************************************************************/ #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) #define SPQ_BLOCK_DELAY_MAX_ITER (10) #define SPQ_BLOCK_DELAY_US (10) #define SPQ_BLOCK_SLEEP_MAX_ITER (1000) #define SPQ_BLOCK_SLEEP_MS (5) /*************************************************************************** * Blocking Imp. (BLOCK/EBLOCK mode) ***************************************************************************/ static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, void *cookie, union event_ring_data *data, u8 fw_return_code) { struct qed_spq_comp_done *comp_done; comp_done = (struct qed_spq_comp_done *)cookie; comp_done->fw_return_code = fw_return_code; /* Make sure completion done is visible on waiting thread */ smp_store_release(&comp_done->done, 0x1); } static int __qed_spq_block(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent, u8 *p_fw_ret, bool sleep_between_iter) { struct qed_spq_comp_done *comp_done; u32 iter_cnt; comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER : SPQ_BLOCK_DELAY_MAX_ITER; while (iter_cnt--) { /* Validate we receive completion update */ if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */ if (p_fw_ret) *p_fw_ret = comp_done->fw_return_code; return 0; } if (sleep_between_iter) msleep(SPQ_BLOCK_SLEEP_MS); else udelay(SPQ_BLOCK_DELAY_US); } return -EBUSY; } static int qed_spq_block(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent, u8 *p_fw_ret, bool skip_quick_poll) { struct qed_spq_comp_done *comp_done; struct qed_ptt *p_ptt; int rc; /* A relatively short polling period w/o sleeping, to allow the FW to * complete the ramrod and thus possibly to avoid the following sleeps. */ if (!skip_quick_poll) { rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false); if (!rc) return 0; } /* Move to polling with a sleeping period between iterations */ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); if (!rc) return 0; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) { DP_NOTICE(p_hwfn, "ptt, failed to acquire\n"); return -EAGAIN; } DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); rc = qed_mcp_drain(p_hwfn, p_ptt); qed_ptt_release(p_hwfn, p_ptt); if (rc) { DP_NOTICE(p_hwfn, "MCP drain failed\n"); goto err; } /* Retry after drain */ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); if (!rc) return 0; comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; if (comp_done->done == 1) { if (p_fw_ret) *p_fw_ret = comp_done->fw_return_code; return 0; } err: p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EBUSY; qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL, "Ramrod is stuck [CID %08x %s:%02x %s:%02x echo %04x]\n", le32_to_cpu(p_ent->elem.hdr.cid), qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id, p_ent->elem.hdr.cmd_id), p_ent->elem.hdr.cmd_id, qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id), p_ent->elem.hdr.protocol_id, le16_to_cpu(p_ent->elem.hdr.echo)); qed_ptt_release(p_hwfn, p_ptt); return -EBUSY; } /*************************************************************************** * SPQ entries inner API ***************************************************************************/ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) { p_ent->flags = 0; switch (p_ent->comp_mode) { case QED_SPQ_MODE_EBLOCK: case QED_SPQ_MODE_BLOCK: p_ent->comp_cb.function = qed_spq_blocking_cb; break; case QED_SPQ_MODE_CB: break; default: DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", p_ent->comp_mode); return -EINVAL; } DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Ramrod hdr: [CID 0x%08x %s:0x%02x %s:0x%02x] Data ptr: [%08x:%08x] Cmpltion Mode: %s\n", p_ent->elem.hdr.cid, qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id, p_ent->elem.hdr.cmd_id), p_ent->elem.hdr.cmd_id, qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id), p_ent->elem.hdr.protocol_id, p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo, D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", "MODE_CB")); return 0; } /*************************************************************************** * HSI access ***************************************************************************/ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq) { struct core_conn_context *p_cxt; struct qed_cxt_info cxt_info; u16 physical_q; int rc; cxt_info.iid = p_spq->cid; rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); if (rc < 0) { DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n", p_spq->cid); return; } p_cxt = cxt_info.p_cxt; SET_FIELD(p_cxt->xstorm_ag_context.flags10, XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); SET_FIELD(p_cxt->xstorm_ag_context.flags1, XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); SET_FIELD(p_cxt->xstorm_ag_context.flags9, XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); /* QM physical queue */ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q); p_cxt->xstorm_st_context.spq_base_addr.lo = DMA_LO_LE(p_spq->chain.p_phys_addr); p_cxt->xstorm_st_context.spq_base_addr.hi = DMA_HI_LE(p_spq->chain.p_phys_addr); } static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq, struct qed_spq_entry *p_ent) { struct qed_chain *p_chain = &p_hwfn->p_spq->chain; struct core_db_data *p_db_data = &p_spq->db_data; u16 echo = qed_chain_get_prod_idx(p_chain); struct slow_path_element *elem; p_ent->elem.hdr.echo = cpu_to_le16(echo); elem = qed_chain_produce(p_chain); if (!elem) { DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); return -EINVAL; } *elem = p_ent->elem; /* struct assignment */ /* send a doorbell on the slow hwfn session */ p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); /* make sure the SPQE is updated before the doorbell */ wmb(); DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data); /* make sure doorbell is rang */ wmb(); DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n", p_spq->db_addr_offset, p_spq->cid, p_db_data->params, p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain)); return 0; } /*************************************************************************** * Asynchronous events ***************************************************************************/ static int qed_async_event_completion(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) { qed_spq_async_comp_cb cb; if (!p_hwfn->p_spq) return -EINVAL; if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) { DP_ERR(p_hwfn, "Wrong protocol: %s:%d\n", qed_get_protocol_type_str(p_eqe->protocol_id), p_eqe->protocol_id); return -EINVAL; } cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id]; if (cb) { return cb(p_hwfn, p_eqe->opcode, p_eqe->echo, &p_eqe->data, p_eqe->fw_return_code); } else { DP_NOTICE(p_hwfn, "Unknown Async completion for %s:%d\n", qed_get_protocol_type_str(p_eqe->protocol_id), p_eqe->protocol_id); return -EINVAL; } } int qed_spq_register_async_cb(struct qed_hwfn *p_hwfn, enum protocol_type protocol_id, qed_spq_async_comp_cb cb) { if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) return -EINVAL; p_hwfn->p_spq->async_comp_cb[protocol_id] = cb; return 0; } void qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn, enum protocol_type protocol_id) { if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) return; p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL; } /*************************************************************************** * EQ API ***************************************************************************/ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod) { u32 addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, USTORM_EQE_CONS, p_hwfn->rel_pf_id); REG_WR16(p_hwfn, addr, prod); } int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) { struct qed_eq *p_eq = cookie; struct qed_chain *p_chain = &p_eq->chain; int rc = 0; /* take a snapshot of the FW consumer */ u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons); DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx); /* Need to guarantee the fw_cons index we use points to a usuable * element (to comply with our chain), so our macros would comply */ if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) == qed_chain_get_usable_per_page(p_chain)) fw_cons_idx += qed_chain_get_unusable_per_page(p_chain); /* Complete current segment of eq entries */ while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) { struct event_ring_entry *p_eqe = qed_chain_consume(p_chain); if (!p_eqe) { rc = -EINVAL; break; } DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "op %x prot %x res0 %x echo %x fwret %x flags %x\n", p_eqe->opcode, p_eqe->protocol_id, p_eqe->reserved0, le16_to_cpu(p_eqe->echo), p_eqe->fw_return_code, p_eqe->flags); if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) { if (qed_async_event_completion(p_hwfn, p_eqe)) rc = -EINVAL; } else if (qed_spq_completion(p_hwfn, p_eqe->echo, p_eqe->fw_return_code, &p_eqe->data)) { rc = -EINVAL; } qed_chain_recycle_consumed(p_chain); } qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); /* Attempt to post pending requests */ spin_lock_bh(&p_hwfn->p_spq->lock); rc = qed_spq_pend_post(p_hwfn); spin_unlock_bh(&p_hwfn->p_spq->lock); return rc; } int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) { struct qed_chain_init_params params = { .mode = QED_CHAIN_MODE_PBL, .intended_use = QED_CHAIN_USE_TO_PRODUCE, .cnt_type = QED_CHAIN_CNT_TYPE_U16, .num_elems = num_elem, .elem_size = sizeof(union event_ring_element), }; struct qed_eq *p_eq; int ret; /* Allocate EQ struct */ p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL); if (!p_eq) return -ENOMEM; ret = qed_chain_alloc(p_hwfn->cdev, &p_eq->chain, &params); if (ret) { DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n"); goto eq_allocate_fail; } /* register EQ completion on the SP SB */ qed_int_register_cb(p_hwfn, qed_eq_completion, p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons); p_hwfn->p_eq = p_eq; return 0; eq_allocate_fail: kfree(p_eq); return ret; } void qed_eq_setup(struct qed_hwfn *p_hwfn) { qed_chain_reset(&p_hwfn->p_eq->chain); } void qed_eq_free(struct qed_hwfn *p_hwfn) { if (!p_hwfn->p_eq) return; qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain); kfree(p_hwfn->p_eq); p_hwfn->p_eq = NULL; } /*************************************************************************** * CQE API - manipulate EQ functionality ***************************************************************************/ static int qed_cqe_completion(struct qed_hwfn *p_hwfn, struct eth_slow_path_rx_cqe *cqe, enum protocol_type protocol) { if (IS_VF(p_hwfn->cdev)) return 0; /* @@@tmp - it's possible we'll eventually want to handle some * actual commands that can arrive here, but for now this is only * used to complete the ramrod using the echo value on the cqe */ return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL); } int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, struct eth_slow_path_rx_cqe *cqe) { int rc; rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH); if (rc) DP_NOTICE(p_hwfn, "Failed to handle RXQ CQE [cmd 0x%02x]\n", cqe->ramrod_cmd_id); return rc; } /*************************************************************************** * Slow hwfn Queue (spq) ***************************************************************************/ void qed_spq_setup(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq_entry *p_virt = NULL; struct core_db_data *p_db_data; void __iomem *db_addr; dma_addr_t p_phys = 0; u32 i, capacity; int rc; INIT_LIST_HEAD(&p_spq->pending); INIT_LIST_HEAD(&p_spq->completion_pending); INIT_LIST_HEAD(&p_spq->free_pool); INIT_LIST_HEAD(&p_spq->unlimited_pending); spin_lock_init(&p_spq->lock); /* SPQ empty pool */ p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod); p_virt = p_spq->p_virt; capacity = qed_chain_get_capacity(&p_spq->chain); for (i = 0; i < capacity; i++) { DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys); list_add_tail(&p_virt->list, &p_spq->free_pool); p_virt++; p_phys += sizeof(struct qed_spq_entry); } /* Statistics */ p_spq->normal_count = 0; p_spq->comp_count = 0; p_spq->comp_sent_count = 0; p_spq->unlimited_pending_count = 0; bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE); p_spq->comp_bitmap_idx = 0; /* SPQ cid, cannot fail */ qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); qed_spq_hw_initialize(p_hwfn, p_spq); /* reset the chain itself */ qed_chain_reset(&p_spq->chain); /* Initialize the address/data of the SPQ doorbell */ p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY); p_db_data = &p_spq->db_data; memset(p_db_data, 0, sizeof(*p_db_data)); SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM); SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX); SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL, DQ_XCM_CORE_SPQ_PROD_CMD); p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD; /* Register the SPQ doorbell with the doorbell recovery mechanism */ db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + p_spq->db_addr_offset); rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data, DB_REC_WIDTH_32B, DB_REC_KERNEL); if (rc) DP_INFO(p_hwfn, "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n"); } int qed_spq_alloc(struct qed_hwfn *p_hwfn) { struct qed_chain_init_params params = { .mode = QED_CHAIN_MODE_SINGLE, .intended_use = QED_CHAIN_USE_TO_PRODUCE, .cnt_type = QED_CHAIN_CNT_TYPE_U16, .elem_size = sizeof(struct slow_path_element), }; struct qed_dev *cdev = p_hwfn->cdev; struct qed_spq_entry *p_virt = NULL; struct qed_spq *p_spq = NULL; dma_addr_t p_phys = 0; u32 capacity; int ret; /* SPQ struct */ p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL); if (!p_spq) return -ENOMEM; /* SPQ ring */ ret = qed_chain_alloc(cdev, &p_spq->chain, &params); if (ret) { DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n"); goto spq_chain_alloc_fail; } /* allocate and fill the SPQ elements (incl. ramrod data list) */ capacity = qed_chain_get_capacity(&p_spq->chain); ret = -ENOMEM; p_virt = dma_alloc_coherent(&cdev->pdev->dev, capacity * sizeof(struct qed_spq_entry), &p_phys, GFP_KERNEL); if (!p_virt) goto spq_alloc_fail; p_spq->p_virt = p_virt; p_spq->p_phys = p_phys; p_hwfn->p_spq = p_spq; return 0; spq_alloc_fail: qed_chain_free(cdev, &p_spq->chain); spq_chain_alloc_fail: kfree(p_spq); return ret; } void qed_spq_free(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; void __iomem *db_addr; u32 capacity; if (!p_spq) return; /* Delete the SPQ doorbell from the doorbell recovery mechanism */ db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + p_spq->db_addr_offset); qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data); if (p_spq->p_virt) { capacity = qed_chain_get_capacity(&p_spq->chain); dma_free_coherent(&p_hwfn->cdev->pdev->dev, capacity * sizeof(struct qed_spq_entry), p_spq->p_virt, p_spq->p_phys); } qed_chain_free(p_hwfn->cdev, &p_spq->chain); kfree(p_spq); p_hwfn->p_spq = NULL; } int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent) { struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq_entry *p_ent = NULL; int rc = 0; spin_lock_bh(&p_spq->lock); if (list_empty(&p_spq->free_pool)) { p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC); if (!p_ent) { DP_NOTICE(p_hwfn, "Failed to allocate an SPQ entry for a pending ramrod\n"); rc = -ENOMEM; goto out_unlock; } p_ent->queue = &p_spq->unlimited_pending; } else { p_ent = list_first_entry(&p_spq->free_pool, struct qed_spq_entry, list); list_del(&p_ent->list); p_ent->queue = &p_spq->pending; } *pp_ent = p_ent; out_unlock: spin_unlock_bh(&p_spq->lock); return rc; } /* Locked variant; Should be called while the SPQ lock is taken */ static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) { list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool); } void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) { spin_lock_bh(&p_hwfn->p_spq->lock); __qed_spq_return_entry(p_hwfn, p_ent); spin_unlock_bh(&p_hwfn->p_spq->lock); } /** * qed_spq_add_entry() - Add a new entry to the pending list. * Should be used while lock is being held. * * @p_hwfn: HW device data. * @p_ent: An entry to add. * @priority: Desired priority. * * Adds an entry to the pending list is there is room (an empty * element is available in the free_pool), or else places the * entry in the unlimited_pending pool. * * Return: zero on success, -EINVAL on invalid @priority. */ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent, enum spq_priority priority) { struct qed_spq *p_spq = p_hwfn->p_spq; if (p_ent->queue == &p_spq->unlimited_pending) { if (list_empty(&p_spq->free_pool)) { list_add_tail(&p_ent->list, &p_spq->unlimited_pending); p_spq->unlimited_pending_count++; return 0; } else { struct qed_spq_entry *p_en2; p_en2 = list_first_entry(&p_spq->free_pool, struct qed_spq_entry, list); list_del(&p_en2->list); /* Copy the ring element physical pointer to the new * entry, since we are about to override the entire ring * entry and don't want to lose the pointer. */ p_ent->elem.data_ptr = p_en2->elem.data_ptr; *p_en2 = *p_ent; /* EBLOCK responsible to free the allocated p_ent */ if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) kfree(p_ent); else p_ent->post_ent = p_en2; p_ent = p_en2; } } /* entry is to be placed in 'pending' queue */ switch (priority) { case QED_SPQ_PRIORITY_NORMAL: list_add_tail(&p_ent->list, &p_spq->pending); p_spq->normal_count++; break; case QED_SPQ_PRIORITY_HIGH: list_add(&p_ent->list, &p_spq->pending); p_spq->high_count++; break; default: return -EINVAL; } return 0; } /*************************************************************************** * Accessor ***************************************************************************/ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) { if (!p_hwfn->p_spq) return 0xffffffff; /* illegal */ return p_hwfn->p_spq->cid; } /*************************************************************************** * Posting new Ramrods ***************************************************************************/ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, struct list_head *head, u32 keep_reserve) { struct qed_spq *p_spq = p_hwfn->p_spq; int rc; while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve && !list_empty(head)) { struct qed_spq_entry *p_ent = list_first_entry(head, struct qed_spq_entry, list); list_move_tail(&p_ent->list, &p_spq->completion_pending); p_spq->comp_sent_count++; rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent); if (rc) { list_del(&p_ent->list); __qed_spq_return_entry(p_hwfn, p_ent); return rc; } } return 0; } int qed_spq_pend_post(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq_entry *p_ent = NULL; while (!list_empty(&p_spq->free_pool)) { if (list_empty(&p_spq->unlimited_pending)) break; p_ent = list_first_entry(&p_spq->unlimited_pending, struct qed_spq_entry, list); if (!p_ent) return -EINVAL; list_del(&p_ent->list); qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); } return qed_spq_post_list(p_hwfn, &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT); } static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent, u8 *fw_return_code) { if (!fw_return_code) return; if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE || p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP) *fw_return_code = RDMA_RETURN_OK; } /* Avoid overriding of SPQ entries when getting out-of-order completions, by * marking the completions in a bitmap and increasing the chain consumer only * for the first successive completed entries. */ static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo) { u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; struct qed_spq *p_spq = p_hwfn->p_spq; __set_bit(pos, p_spq->p_comp_bitmap); while (test_bit(p_spq->comp_bitmap_idx, p_spq->p_comp_bitmap)) { __clear_bit(p_spq->comp_bitmap_idx, p_spq->p_comp_bitmap); p_spq->comp_bitmap_idx++; qed_chain_return_produced(&p_spq->chain); } } int qed_spq_post(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent, u8 *fw_return_code) { int rc = 0; struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL; bool b_ret_ent = true; bool eblock; if (!p_hwfn) return -EINVAL; if (!p_ent) { DP_NOTICE(p_hwfn, "Got a NULL pointer\n"); return -EINVAL; } if (p_hwfn->cdev->recov_in_prog) { DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Recovery is in progress. Skip spq post [%s:%02x %s:%02x]\n", qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id, p_ent->elem.hdr.cmd_id), p_ent->elem.hdr.cmd_id, qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id), p_ent->elem.hdr.protocol_id); /* Let the flow complete w/o any error handling */ qed_spq_recov_set_ret_code(p_ent, fw_return_code); return 0; } /* Complete the entry */ rc = qed_spq_fill_entry(p_hwfn, p_ent); spin_lock_bh(&p_spq->lock); /* Check return value after LOCK is taken for cleaner error flow */ if (rc) goto spq_post_fail; /* Check if entry is in block mode before qed_spq_add_entry, * which might kfree p_ent. */ eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK); /* Add the request to the pending queue */ rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); if (rc) goto spq_post_fail; rc = qed_spq_pend_post(p_hwfn); if (rc) { /* Since it's possible that pending failed for a different * entry [although unlikely], the failed entry was already * dealt with; No need to return it here. */ b_ret_ent = false; goto spq_post_fail; } spin_unlock_bh(&p_spq->lock); if (eblock) { /* For entries in QED BLOCK mode, the completion code cannot * perform the necessary cleanup - if it did, we couldn't * access p_ent here to see whether it's successful or not. * Thus, after gaining the answer perform the cleanup here. */ rc = qed_spq_block(p_hwfn, p_ent, fw_return_code, p_ent->queue == &p_spq->unlimited_pending); if (p_ent->queue == &p_spq->unlimited_pending) { struct qed_spq_entry *p_post_ent = p_ent->post_ent; kfree(p_ent); /* Return the entry which was actually posted */ p_ent = p_post_ent; } if (rc) goto spq_post_fail2; /* return to pool */ qed_spq_return_entry(p_hwfn, p_ent); } return rc; spq_post_fail2: spin_lock_bh(&p_spq->lock); list_del(&p_ent->list); qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo); spq_post_fail: /* return to the free pool */ if (b_ret_ent) __qed_spq_return_entry(p_hwfn, p_ent); spin_unlock_bh(&p_spq->lock); return rc; } int qed_spq_completion(struct qed_hwfn *p_hwfn, __le16 echo, u8 fw_return_code, union event_ring_data *p_data) { struct qed_spq *p_spq; struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *tmp; struct qed_spq_entry *found = NULL; if (!p_hwfn) return -EINVAL; p_spq = p_hwfn->p_spq; if (!p_spq) return -EINVAL; spin_lock_bh(&p_spq->lock); list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { if (p_ent->elem.hdr.echo == echo) { list_del(&p_ent->list); qed_spq_comp_bmap_update(p_hwfn, echo); p_spq->comp_count++; found = p_ent; break; } /* This is relatively uncommon - depends on scenarios * which have mutliple per-PF sent ramrods. */ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n", le16_to_cpu(echo), le16_to_cpu(p_ent->elem.hdr.echo)); } /* Release lock before callback, as callback may post * an additional ramrod. */ spin_unlock_bh(&p_spq->lock); if (!found) { DP_NOTICE(p_hwfn, "Failed to find an entry this EQE [echo %04x] completes\n", le16_to_cpu(echo)); return -EEXIST; } DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete EQE [echo %04x]: func %p cookie %p)\n", le16_to_cpu(echo), p_ent->comp_cb.function, p_ent->comp_cb.cookie); if (found->comp_cb.function) found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, fw_return_code); else DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Got a completion without a callback function\n"); if (found->comp_mode != QED_SPQ_MODE_EBLOCK) /* EBLOCK is responsible for returning its own entry into the * free list. */ qed_spq_return_entry(p_hwfn, found); return 0; } #define QED_SPQ_CONSQ_ELEM_SIZE 0x80 int qed_consq_alloc(struct qed_hwfn *p_hwfn) { struct qed_chain_init_params params = { .mode = QED_CHAIN_MODE_PBL, .intended_use = QED_CHAIN_USE_TO_PRODUCE, .cnt_type = QED_CHAIN_CNT_TYPE_U16, .num_elems = QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE, .elem_size = QED_SPQ_CONSQ_ELEM_SIZE, }; struct qed_consq *p_consq; int ret; /* Allocate ConsQ struct */ p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL); if (!p_consq) return -ENOMEM; /* Allocate and initialize ConsQ chain */ ret = qed_chain_alloc(p_hwfn->cdev, &p_consq->chain, &params); if (ret) { DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain"); goto consq_alloc_fail; } p_hwfn->p_consq = p_consq; return 0; consq_alloc_fail: kfree(p_consq); return ret; } void qed_consq_setup(struct qed_hwfn *p_hwfn) { qed_chain_reset(&p_hwfn->p_consq->chain); } void qed_consq_free(struct qed_hwfn *p_hwfn) { if (!p_hwfn->p_consq) return; qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain); kfree(p_hwfn->p_consq); p_hwfn->p_consq = NULL; }
linux-master
drivers/net/ethernet/qlogic/qed/qed_spq.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <asm/byteorder.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/if_vlan.h> #include "qed.h" #include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" #include "qed_ll2.h" #include "qed_mcp.h" #include "qed_reg_addr.h" #include <linux/qed/qed_rdma_if.h> #include "qed_rdma.h" #include "qed_roce.h" #include "qed_sp.h" static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, __le16 echo, union event_ring_data *data, u8 fw_return_code) { struct qed_rdma_events events = p_hwfn->p_rdma_info->events; union rdma_eqe_data *rdata = &data->rdma_data; if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { u16 icid = (u16)le32_to_cpu(rdata->rdma_destroy_qp_data.cid); /* icid release in this async event can occur only if the icid * was offloaded to the FW. In case it wasn't offloaded this is * handled in qed_roce_sp_destroy_qp. */ qed_roce_free_real_icid(p_hwfn, icid); } else if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY || fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) { u16 srq_id = (u16)le32_to_cpu(rdata->async_handle.lo); events.affiliated_event(events.context, fw_event_code, &srq_id); } else { events.affiliated_event(events.context, fw_event_code, (void *)&rdata->async_handle); } return 0; } void qed_roce_stop(struct qed_hwfn *p_hwfn) { struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map; int wait_count = 0; /* when destroying a_RoCE QP the control is returned to the user after * the synchronous part. The asynchronous part may take a little longer. * We delay for a short while if an async destroy QP is still expected. * Beyond the added delay we clear the bitmap anyway. */ while (!bitmap_empty(rcid_map->bitmap, rcid_map->max_count)) { /* If the HW device is during recovery, all resources are * immediately reset without receiving a per-cid indication * from HW. In this case we don't expect the cid bitmap to be * cleared. */ if (p_hwfn->cdev->recov_in_prog) return; msleep(100); if (wait_count++ > 20) { DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n"); break; } } } static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, __le32 *dst_gid) { u32 i; if (qp->roce_mode == ROCE_V2_IPV4) { /* The IPv4 addresses shall be aligned to the highest word. * The lower words must be zero. */ memset(src_gid, 0, sizeof(union qed_gid)); memset(dst_gid, 0, sizeof(union qed_gid)); src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr); dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr); } else { /* GIDs and IPv6 addresses coincide in location and size */ for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) { src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]); dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]); } } } static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) { switch (roce_mode) { case ROCE_V1: return PLAIN_ROCE; case ROCE_V2_IPV4: return RROCE_IPV4; case ROCE_V2_IPV6: return RROCE_IPV6; default: return MAX_ROCE_FLAVOR; } } static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) { spin_lock_bh(&p_hwfn->p_rdma_info->lock); qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) { struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; u32 responder_icid; u32 requester_icid; int rc; spin_lock_bh(&p_hwfn->p_rdma_info->lock); rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map, &responder_icid); if (rc) { spin_unlock_bh(&p_rdma_info->lock); return rc; } rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map, &requester_icid); spin_unlock_bh(&p_rdma_info->lock); if (rc) goto err; /* the two icid's should be adjacent */ if ((requester_icid - responder_icid) != 1) { DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n"); rc = -EINVAL; goto err; } responder_icid += qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto); requester_icid += qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto); /* If these icids require a new ILT line allocate DMA-able context for * an ILT page */ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid); if (rc) goto err; rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid); if (rc) goto err; *cid = (u16)responder_icid; return rc; err: spin_lock_bh(&p_rdma_info->lock); qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid); qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid); spin_unlock_bh(&p_rdma_info->lock); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate CID - failed, rc = %d\n", rc); return rc; } static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid) { spin_lock_bh(&p_hwfn->p_rdma_info->lock); qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { u8 pri, tc = 0; if (qp->vlan_id) { pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; tc = qed_dcbx_get_priority_tc(p_hwfn, pri); } DP_VERBOSE(p_hwfn, QED_MSG_SP, "qp icid %u tc: %u (vlan priority %s)\n", qp->icid, tc, qp->vlan_id ? "enabled" : "disabled"); return tc; } static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { struct roce_create_qp_resp_ramrod_data *p_ramrod; u16 regular_latency_queue, low_latency_queue; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; enum protocol_type proto; u32 flags = 0; int rc; u8 tc; if (!qp->has_resp) return 0; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); /* Allocate DMA-able memory for IRQ */ qp->irq_num_pages = 1; qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, RDMA_RING_PAGE_SIZE, &qp->irq_phys_addr, GFP_KERNEL); if (!qp->irq) { rc = -ENOMEM; DP_NOTICE(p_hwfn, "qed create responder failed: cannot allocate memory (irq). rc = %d\n", rc); return rc; } /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qp->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP, PROTOCOLID_ROCE, &init_data); if (rc) goto err; SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, qed_roce_mode_to_flavor(qp->roce_mode)); SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN, qp->incoming_rdma_read_en); SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN, qp->incoming_rdma_write_en); SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN, qp->incoming_atomic_en); SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, qp->e2e_flow_control_en); SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq); SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN, qp->fmr_and_reserved_lkey); SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, qp->min_rnr_nak_timer); SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG, qed_rdma_is_xrc_qp(qp)); p_ramrod = &p_ent->ramrod.roce_create_qp_resp; p_ramrod->flags = cpu_to_le32(flags); p_ramrod->max_ird = qp->max_rd_atomic_resp; p_ramrod->traffic_class = qp->traffic_class_tos; p_ramrod->hop_limit = qp->hop_limit_ttl; p_ramrod->irq_num_pages = qp->irq_num_pages; p_ramrod->p_key = cpu_to_le16(qp->pkey); p_ramrod->flow_label = cpu_to_le32(qp->flow_label); p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp); p_ramrod->mtu = cpu_to_le16(qp->mtu); p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn); p_ramrod->pd = cpu_to_le16(qp->pd); p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages); DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr); DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr); qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi; p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo; p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi; p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo; p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id); p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id); tc = qed_roce_get_qp_tc(p_hwfn, qp); regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); DP_VERBOSE(p_hwfn, QED_MSG_SP, "qp icid %u pqs: regular_latency %u low_latency %u\n", qp->icid, regular_latency_queue - CM_TX_PQ_BASE, low_latency_queue - CM_TX_PQ_BASE); p_ramrod->regular_latency_phy_queue = cpu_to_le16(regular_latency_queue); p_ramrod->low_latency_phy_queue = cpu_to_le16(low_latency_queue); p_ramrod->dpi = cpu_to_le16(qp->dpi); qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port); p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue; rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) goto err; qp->resp_offloaded = true; qp->cq_prod = 0; proto = p_hwfn->p_rdma_info->proto; qed_roce_set_real_cid(p_hwfn, qp->icid - qed_cxt_get_proto_cid_start(p_hwfn, proto)); return rc; err: DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc); dma_free_coherent(&p_hwfn->cdev->pdev->dev, qp->irq_num_pages * RDMA_RING_PAGE_SIZE, qp->irq, qp->irq_phys_addr); return rc; } static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { struct roce_create_qp_req_ramrod_data *p_ramrod; u16 regular_latency_queue, low_latency_queue; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; enum protocol_type proto; u16 flags = 0; int rc; u8 tc; if (!qp->has_req) return 0; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); /* Allocate DMA-able memory for ORQ */ qp->orq_num_pages = 1; qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, RDMA_RING_PAGE_SIZE, &qp->orq_phys_addr, GFP_KERNEL); if (!qp->orq) { rc = -ENOMEM; DP_NOTICE(p_hwfn, "qed create requester failed: cannot allocate memory (orq). rc = %d\n", rc); return rc; } /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qp->icid + 1; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP, PROTOCOLID_ROCE, &init_data); if (rc) goto err; SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, qed_roce_mode_to_flavor(qp->roce_mode)); SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN, qp->fmr_and_reserved_lkey); SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, qp->rnr_retry_cnt); SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG, qed_rdma_is_xrc_qp(qp)); p_ramrod = &p_ent->ramrod.roce_create_qp_req; p_ramrod->flags = cpu_to_le16(flags); SET_FIELD(p_ramrod->flags2, ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE, qp->edpm_mode); p_ramrod->max_ord = qp->max_rd_atomic_req; p_ramrod->traffic_class = qp->traffic_class_tos; p_ramrod->hop_limit = qp->hop_limit_ttl; p_ramrod->orq_num_pages = qp->orq_num_pages; p_ramrod->p_key = cpu_to_le16(qp->pkey); p_ramrod->flow_label = cpu_to_le32(qp->flow_label); p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp); p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout); p_ramrod->mtu = cpu_to_le16(qp->mtu); p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn); p_ramrod->pd = cpu_to_le16(qp->pd); p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages); DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr); DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr); qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi; p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo; p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi; p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo; p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); tc = qed_roce_get_qp_tc(p_hwfn, qp); regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); DP_VERBOSE(p_hwfn, QED_MSG_SP, "qp icid %u pqs: regular_latency %u low_latency %u\n", qp->icid, regular_latency_queue - CM_TX_PQ_BASE, low_latency_queue - CM_TX_PQ_BASE); p_ramrod->regular_latency_phy_queue = cpu_to_le16(regular_latency_queue); p_ramrod->low_latency_phy_queue = cpu_to_le16(low_latency_queue); p_ramrod->dpi = cpu_to_le16(qp->dpi); qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port); p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue; rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) goto err; qp->req_offloaded = true; proto = p_hwfn->p_rdma_info->proto; qed_roce_set_real_cid(p_hwfn, qp->icid + 1 - qed_cxt_get_proto_cid_start(p_hwfn, proto)); return rc; err: DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc); dma_free_coherent(&p_hwfn->cdev->pdev->dev, qp->orq_num_pages * RDMA_RING_PAGE_SIZE, qp->orq, qp->orq_phys_addr); return rc; } static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, bool move_to_err, u32 modify_flags) { struct roce_modify_qp_resp_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; u16 flags = 0; int rc; if (!qp->has_resp) return 0; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); if (move_to_err && !qp->resp_offloaded) return 0; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qp->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_EVENT_MODIFY_QP, PROTOCOLID_ROCE, &init_data); if (rc) { DP_NOTICE(p_hwfn, "rc = %d\n", rc); return rc; } SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, !!move_to_err); SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN, qp->incoming_rdma_read_en); SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN, qp->incoming_rdma_write_en); SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN, qp->incoming_atomic_en); SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, qp->e2e_flow_control_en); SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG, GET_FIELD(modify_flags, QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)); SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG, GET_FIELD(modify_flags, QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)); SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)); p_ramrod = &p_ent->ramrod.roce_modify_qp_resp; p_ramrod->flags = cpu_to_le16(flags); p_ramrod->fields = 0; SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, qp->min_rnr_nak_timer); p_ramrod->max_ird = qp->max_rd_atomic_resp; p_ramrod->traffic_class = qp->traffic_class_tos; p_ramrod->hop_limit = qp->hop_limit_ttl; p_ramrod->p_key = cpu_to_le16(qp->pkey); p_ramrod->flow_label = cpu_to_le32(qp->flow_label); p_ramrod->mtu = cpu_to_le16(qp->mtu); qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); rc = qed_spq_post(p_hwfn, p_ent, NULL); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc); return rc; } static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, bool move_to_sqd, bool move_to_err, u32 modify_flags) { struct roce_modify_qp_req_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; u16 flags = 0; int rc; if (!qp->has_req) return 0; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); if (move_to_err && !(qp->req_offloaded)) return 0; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qp->icid + 1; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_EVENT_MODIFY_QP, PROTOCOLID_ROCE, &init_data); if (rc) { DP_NOTICE(p_hwfn, "rc = %d\n", rc); return rc; } SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, !!move_to_err); SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, !!move_to_sqd); SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY, qp->sqd_async); SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG, GET_FIELD(modify_flags, QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)); SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)); SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)); SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)); p_ramrod = &p_ent->ramrod.roce_modify_qp_req; p_ramrod->flags = cpu_to_le16(flags); p_ramrod->fields = 0; SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, qp->rnr_retry_cnt); p_ramrod->max_ord = qp->max_rd_atomic_req; p_ramrod->traffic_class = qp->traffic_class_tos; p_ramrod->hop_limit = qp->hop_limit_ttl; p_ramrod->p_key = cpu_to_le16(qp->pkey); p_ramrod->flow_label = cpu_to_le32(qp->flow_label); p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout); p_ramrod->mtu = cpu_to_le16(qp->mtu); qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); rc = qed_spq_post(p_hwfn, p_ent, NULL); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc); return rc; } static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, u32 *cq_prod) { struct roce_destroy_qp_resp_output_params *p_ramrod_res; struct roce_destroy_qp_resp_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; dma_addr_t ramrod_res_phys; int rc; if (!qp->has_resp) { *cq_prod = 0; return 0; } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); *cq_prod = qp->cq_prod; if (!qp->resp_offloaded) { /* If a responder was never offload, we need to free the cids * allocated in create_qp as a FW async event will never arrive */ u32 cid; cid = qp->icid - qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); qed_roce_free_cid_pair(p_hwfn, (u16)cid); return 0; } /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qp->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP, PROTOCOLID_ROCE, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp; p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res), &ramrod_res_phys, GFP_KERNEL); if (!p_ramrod_res) { rc = -ENOMEM; DP_NOTICE(p_hwfn, "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", rc); qed_sp_destroy_request(p_hwfn, p_ent); return rc; } DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) goto err; *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod); qp->cq_prod = *cq_prod; /* Free IRQ - only if ramrod succeeded, in case FW is still using it */ dma_free_coherent(&p_hwfn->cdev->pdev->dev, qp->irq_num_pages * RDMA_RING_PAGE_SIZE, qp->irq, qp->irq_phys_addr); qp->resp_offloaded = false; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc); err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(struct roce_destroy_qp_resp_output_params), p_ramrod_res, ramrod_res_phys); return rc; } static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { struct roce_destroy_qp_req_output_params *p_ramrod_res; struct roce_destroy_qp_req_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; dma_addr_t ramrod_res_phys; int rc = -ENOMEM; if (!qp->has_req) return 0; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); if (!qp->req_offloaded) return 0; p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res), &ramrod_res_phys, GFP_KERNEL); if (!p_ramrod_res) { DP_NOTICE(p_hwfn, "qed destroy requester failed: cannot allocate memory (ramrod)\n"); return rc; } /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qp->icid + 1; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP, PROTOCOLID_ROCE, &init_data); if (rc) goto err; p_ramrod = &p_ent->ramrod.roce_destroy_qp_req; DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) goto err; /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ dma_free_coherent(&p_hwfn->cdev->pdev->dev, qp->orq_num_pages * RDMA_RING_PAGE_SIZE, qp->orq, qp->orq_phys_addr); qp->req_offloaded = false; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc); err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res), p_ramrod_res, ramrod_res_phys); return rc; } int qed_roce_query_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, struct qed_rdma_query_qp_out_params *out_params) { struct roce_query_qp_resp_output_params *p_resp_ramrod_res; struct roce_query_qp_req_output_params *p_req_ramrod_res; struct roce_query_qp_resp_ramrod_data *p_resp_ramrod; struct roce_query_qp_req_ramrod_data *p_req_ramrod; struct qed_sp_init_data init_data; dma_addr_t resp_ramrod_res_phys; dma_addr_t req_ramrod_res_phys; struct qed_spq_entry *p_ent; bool rq_err_state; bool sq_err_state; bool sq_draining; int rc = -ENOMEM; if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) { /* We can't send ramrod to the fw since this qp wasn't offloaded * to the fw yet */ out_params->draining = false; out_params->rq_psn = qp->rq_psn; out_params->sq_psn = qp->sq_psn; out_params->state = qp->cur_state; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n"); return 0; } if (!(qp->resp_offloaded)) { DP_NOTICE(p_hwfn, "The responder's qp should be offloaded before requester's\n"); return -EINVAL; } /* Send a query responder ramrod to FW to get RQ-PSN and state */ p_resp_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), &resp_ramrod_res_phys, GFP_KERNEL); if (!p_resp_ramrod_res) { DP_NOTICE(p_hwfn, "qed query qp failed: cannot allocate memory (ramrod)\n"); return rc; } /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qp->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, PROTOCOLID_ROCE, &init_data); if (rc) goto err_resp; p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp; DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys); rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) goto err_resp; out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn); rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags), ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG); dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), p_resp_ramrod_res, resp_ramrod_res_phys); if (!(qp->req_offloaded)) { /* Don't send query qp for the requester */ out_params->sq_psn = qp->sq_psn; out_params->draining = false; if (rq_err_state) qp->cur_state = QED_ROCE_QP_STATE_ERR; out_params->state = qp->cur_state; return 0; } /* Send a query requester ramrod to FW to get SQ-PSN and state */ p_req_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), &req_ramrod_res_phys, GFP_KERNEL); if (!p_req_ramrod_res) { rc = -ENOMEM; DP_NOTICE(p_hwfn, "qed query qp failed: cannot allocate memory (ramrod)\n"); return rc; } /* Get SPQ entry */ init_data.cid = qp->icid + 1; rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, PROTOCOLID_ROCE, &init_data); if (rc) goto err_req; p_req_ramrod = &p_ent->ramrod.roce_query_qp_req; DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys); rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) goto err_req; out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn); sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG); sq_draining = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG); dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), p_req_ramrod_res, req_ramrod_res_phys); out_params->draining = false; if (rq_err_state || sq_err_state) qp->cur_state = QED_ROCE_QP_STATE_ERR; else if (sq_draining) out_params->draining = true; out_params->state = qp->cur_state; return 0; err_req: dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), p_req_ramrod_res, req_ramrod_res_phys); return rc; err_resp: dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), p_resp_ramrod_res, resp_ramrod_res_phys); return rc; } int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { u32 cq_prod; int rc; /* Destroys the specified QP */ if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) && (qp->cur_state != QED_ROCE_QP_STATE_ERR) && (qp->cur_state != QED_ROCE_QP_STATE_INIT)) { DP_NOTICE(p_hwfn, "QP must be in error, reset or init state before destroying it\n"); return -EINVAL; } if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &cq_prod); if (rc) return rc; /* Send destroy requester ramrod */ rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); if (rc) return rc; } return 0; } int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, enum qed_roce_qp_state prev_state, struct qed_rdma_modify_qp_in_params *params) { int rc = 0; /* Perform additional operations according to the current state and the * next state */ if (((prev_state == QED_ROCE_QP_STATE_INIT) || (prev_state == QED_ROCE_QP_STATE_RESET)) && (qp->cur_state == QED_ROCE_QP_STATE_RTR)) { /* Init->RTR or Reset->RTR */ rc = qed_roce_sp_create_responder(p_hwfn, qp); return rc; } else if ((prev_state == QED_ROCE_QP_STATE_RTR) && (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { /* RTR-> RTS */ rc = qed_roce_sp_create_requester(p_hwfn, qp); if (rc) return rc; /* Send modify responder ramrod */ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, params->modify_flags); return rc; } else if ((prev_state == QED_ROCE_QP_STATE_RTS) && (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { /* RTS->RTS */ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, params->modify_flags); if (rc) return rc; rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, params->modify_flags); return rc; } else if ((prev_state == QED_ROCE_QP_STATE_RTS) && (qp->cur_state == QED_ROCE_QP_STATE_SQD)) { /* RTS->SQD */ rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false, params->modify_flags); return rc; } else if ((prev_state == QED_ROCE_QP_STATE_SQD) && (qp->cur_state == QED_ROCE_QP_STATE_SQD)) { /* SQD->SQD */ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, params->modify_flags); if (rc) return rc; rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, params->modify_flags); return rc; } else if ((prev_state == QED_ROCE_QP_STATE_SQD) && (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { /* SQD->RTS */ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, params->modify_flags); if (rc) return rc; rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, params->modify_flags); return rc; } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) { /* ->ERR */ rc = qed_roce_sp_modify_responder(p_hwfn, qp, true, params->modify_flags); if (rc) return rc; rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true, params->modify_flags); return rc; } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) { /* Any state -> RESET */ u32 cq_prod; /* Send destroy responder ramrod */ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &cq_prod); if (rc) return rc; qp->cq_prod = cq_prod; rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); } else { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); } return rc; } static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid) { struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; u32 start_cid, cid, xcid; /* an even icid belongs to a responder while an odd icid belongs to a * requester. The 'cid' received as an input can be either. We calculate * the "partner" icid and call it xcid. Only if both are free then the * "cid" map can be cleared. */ start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto); cid = icid - start_cid; xcid = cid ^ 1; spin_lock_bh(&p_rdma_info->lock); qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid); if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) { qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid); qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid); } spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u8 val; /* if any QPs are already active, we want to disable DPM, since their * context information contains information from before the latest DCBx * update. Otherwise enable it. */ val = qed_rdma_allocated_qps(p_hwfn) ? true : false; p_hwfn->dcbx_no_edpm = (u8)val; qed_rdma_dpm_conf(p_hwfn, p_ptt); } int qed_roce_setup(struct qed_hwfn *p_hwfn) { return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE, qed_roce_async_event); } int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 ll2_ethertype_en; qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE; ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, (ll2_ethertype_en | 0x01)); if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) { DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n"); return -EINVAL; } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n"); return 0; }
linux-master
drivers/net/ethernet/qlogic/qed/qed_roce.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <asm/byteorder.h> #include <asm/param.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/workqueue.h> #include <linux/bitops.h> #include <linux/bug.h> #include <linux/vmalloc.h> #include "qed.h" #include <linux/qed/qed_chain.h> #include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_dev_api.h" #include <linux/qed/qed_eth_if.h> #include "qed_hsi.h" #include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_l2.h" #include "qed_mcp.h" #include "qed_ptp.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" #define QED_MAX_SGES_NUM 16 #define CRC32_POLY 0x1edc6f41 struct qed_l2_info { u32 queues; unsigned long **pp_qid_usage; /* The lock is meant to synchronize access to the qid usage */ struct mutex lock; }; int qed_l2_alloc(struct qed_hwfn *p_hwfn) { struct qed_l2_info *p_l2_info; unsigned long **pp_qids; u32 i; if (!QED_IS_L2_PERSONALITY(p_hwfn)) return 0; p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL); if (!p_l2_info) return -ENOMEM; p_hwfn->p_l2_info = p_l2_info; if (IS_PF(p_hwfn->cdev)) { p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE); } else { u8 rx = 0, tx = 0; qed_vf_get_num_rxqs(p_hwfn, &rx); qed_vf_get_num_txqs(p_hwfn, &tx); p_l2_info->queues = max_t(u8, rx, tx); } pp_qids = kcalloc(p_l2_info->queues, sizeof(unsigned long *), GFP_KERNEL); if (!pp_qids) return -ENOMEM; p_l2_info->pp_qid_usage = pp_qids; for (i = 0; i < p_l2_info->queues; i++) { pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL); if (!pp_qids[i]) return -ENOMEM; } return 0; } void qed_l2_setup(struct qed_hwfn *p_hwfn) { if (!QED_IS_L2_PERSONALITY(p_hwfn)) return; mutex_init(&p_hwfn->p_l2_info->lock); } void qed_l2_free(struct qed_hwfn *p_hwfn) { u32 i; if (!QED_IS_L2_PERSONALITY(p_hwfn)) return; if (!p_hwfn->p_l2_info) return; if (!p_hwfn->p_l2_info->pp_qid_usage) goto out_l2_info; /* Free until hit first uninitialized entry */ for (i = 0; i < p_hwfn->p_l2_info->queues; i++) { if (!p_hwfn->p_l2_info->pp_qid_usage[i]) break; kfree(p_hwfn->p_l2_info->pp_qid_usage[i]); } kfree(p_hwfn->p_l2_info->pp_qid_usage); out_l2_info: kfree(p_hwfn->p_l2_info); p_hwfn->p_l2_info = NULL; } static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) { struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info; u16 queue_id = p_cid->rel.queue_id; bool b_rc = true; u8 first; mutex_lock(&p_l2_info->lock); if (queue_id >= p_l2_info->queues) { DP_NOTICE(p_hwfn, "Requested to increase usage for qzone %04x out of %08x\n", queue_id, p_l2_info->queues); b_rc = false; goto out; } first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id], MAX_QUEUES_PER_QZONE); if (first >= MAX_QUEUES_PER_QZONE) { b_rc = false; goto out; } __set_bit(first, p_l2_info->pp_qid_usage[queue_id]); p_cid->qid_usage_idx = first; out: mutex_unlock(&p_l2_info->lock); return b_rc; } static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) { mutex_lock(&p_hwfn->p_l2_info->lock); clear_bit(p_cid->qid_usage_idx, p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]); mutex_unlock(&p_hwfn->p_l2_info->lock); } void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) { bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID); if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid); /* For PF's VFs we maintain the index inside queue-zone in IOV */ if (p_cid->vfid == QED_QUEUE_CID_SELF) qed_eth_queue_qid_usage_del(p_hwfn, p_cid); vfree(p_cid); } /* The internal is only meant to be directly called by PFs initializeing CIDs * for their VFs. */ static struct qed_queue_cid * _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, u16 opaque_fid, u32 cid, struct qed_queue_start_common_params *p_params, bool b_is_rx, struct qed_queue_cid_vf_params *p_vf_params) { struct qed_queue_cid *p_cid; int rc; p_cid = vzalloc(sizeof(*p_cid)); if (!p_cid) return NULL; p_cid->opaque_fid = opaque_fid; p_cid->cid = cid; p_cid->p_owner = p_hwfn; /* Fill in parameters */ p_cid->rel.vport_id = p_params->vport_id; p_cid->rel.queue_id = p_params->queue_id; p_cid->rel.stats_id = p_params->stats_id; p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; p_cid->b_is_rx = b_is_rx; p_cid->sb_idx = p_params->sb_idx; /* Fill-in bits related to VFs' queues if information was provided */ if (p_vf_params) { p_cid->vfid = p_vf_params->vfid; p_cid->vf_qid = p_vf_params->vf_qid; p_cid->vf_legacy = p_vf_params->vf_legacy; } else { p_cid->vfid = QED_QUEUE_CID_SELF; } /* Don't try calculating the absolute indices for VFs */ if (IS_VF(p_hwfn->cdev)) { p_cid->abs = p_cid->rel; goto out; } /* Calculate the engine-absolute indices of the resources. * This would guarantee they're valid later on. * In some cases [SBs] we already have the right values. */ rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); if (rc) goto fail; rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id); if (rc) goto fail; /* In case of a PF configuring its VF's queues, the stats-id is already * absolute [since there's a single index that's suitable per-VF]. */ if (p_cid->vfid == QED_QUEUE_CID_SELF) { rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id, &p_cid->abs.stats_id); if (rc) goto fail; } else { p_cid->abs.stats_id = p_cid->rel.stats_id; } out: /* VF-images have provided the qid_usage_idx on their own. * Otherwise, we need to allocate a unique one. */ if (!p_vf_params) { if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid)) goto fail; } else { p_cid->qid_usage_idx = p_vf_params->qid_usage_idx; } DP_VERBOSE(p_hwfn, QED_MSG_SP, "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n", p_cid->opaque_fid, p_cid->cid, p_cid->rel.vport_id, p_cid->abs.vport_id, p_cid->rel.queue_id, p_cid->qid_usage_idx, p_cid->abs.queue_id, p_cid->rel.stats_id, p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx); return p_cid; fail: vfree(p_cid); return NULL; } struct qed_queue_cid * qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, u16 opaque_fid, struct qed_queue_start_common_params *p_params, bool b_is_rx, struct qed_queue_cid_vf_params *p_vf_params) { struct qed_queue_cid *p_cid; u8 vfid = QED_CXT_PF_CID; bool b_legacy_vf = false; u32 cid = 0; /* In case of legacy VFs, The CID can be derived from the additional * VF parameters - the VF assumes queue X uses CID X, so we can simply * use the vf_qid for this purpose as well. */ if (p_vf_params) { vfid = p_vf_params->vfid; if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) { b_legacy_vf = true; cid = p_vf_params->vf_qid; } } /* Get a unique firmware CID for this queue, in case it's a PF. * VF's don't need a CID as the queue configuration will be done * by PF. */ if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) { if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid, vfid)) { DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); return NULL; } } p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, p_params, b_is_rx, p_vf_params); if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf) _qed_cxt_release_cid(p_hwfn, cid, vfid); return p_cid; } static struct qed_queue_cid * qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn, u16 opaque_fid, bool b_is_rx, struct qed_queue_start_common_params *p_params) { return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx, NULL); } int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params) { struct vport_start_ramrod_data *p_ramrod = NULL; struct eth_vport_tpa_param *tpa_param; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u16 min_size, rx_mode = 0; u8 abs_vport_id = 0; int rc; rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc) return rc; memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_params->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, ETH_RAMROD_VPORT_START, PROTOCOLID_ETH, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.vport_start; p_ramrod->vport_id = abs_vport_id; p_ramrod->mtu = cpu_to_le16(p_params->mtu); p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; p_ramrod->drop_ttl0_en = p_params->drop_ttl0; p_ramrod->untagged = p_params->only_untagged; SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); /* TPA related fields */ tpa_param = &p_ramrod->tpa_param; memset(tpa_param, 0, sizeof(*tpa_param)); tpa_param->max_buff_num = p_params->max_buffers_per_cqe; switch (p_params->tpa_mode) { case QED_TPA_MODE_GRO: min_size = p_params->mtu / 2; tpa_param->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; tpa_param->tpa_max_size = cpu_to_le16(U16_MAX); tpa_param->tpa_min_size_to_cont = cpu_to_le16(min_size); tpa_param->tpa_min_size_to_start = cpu_to_le16(min_size); tpa_param->tpa_ipv4_en_flg = 1; tpa_param->tpa_ipv6_en_flg = 1; tpa_param->tpa_pkt_split_flg = 1; tpa_param->tpa_gro_consistent_flg = 1; break; default: break; } p_ramrod->tx_switching_en = p_params->tx_switching; p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, p_params->concrete_fid); return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params) { if (IS_VF(p_hwfn->cdev)) { return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id, p_params->mtu, p_params->remove_inner_vlan, p_params->tpa_mode, p_params->max_buffers_per_cqe, p_params->only_untagged); } return qed_sp_eth_vport_start(p_hwfn, p_params); } static int qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, struct vport_update_ramrod_data *p_ramrod, struct qed_rss_params *p_rss) { struct eth_vport_rss_config *p_config; u16 capabilities = 0; int i, table_size; int rc = 0; if (!p_rss) { p_ramrod->common.update_rss_flg = 0; return rc; } p_config = &p_ramrod->rss_config; BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM); rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id); if (rc) return rc; p_ramrod->common.update_rss_flg = p_rss->update_rss_config; p_config->update_rss_capabilities = p_rss->update_rss_capabilities; p_config->update_rss_ind_table = p_rss->update_rss_ind_table; p_config->update_rss_key = p_rss->update_rss_key; p_config->rss_mode = p_rss->rss_enable ? ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED; SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, !!(p_rss->rss_caps & QED_RSS_IPV4)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, !!(p_rss->rss_caps & QED_RSS_IPV6)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, !!(p_rss->rss_caps & QED_RSS_IPV4_TCP)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, !!(p_rss->rss_caps & QED_RSS_IPV6_TCP)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, !!(p_rss->rss_caps & QED_RSS_IPV4_UDP)); SET_FIELD(capabilities, ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, !!(p_rss->rss_caps & QED_RSS_IPV6_UDP)); p_config->tbl_size = p_rss->rss_table_size_log; p_config->capabilities = cpu_to_le16(capabilities); DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", p_ramrod->common.update_rss_flg, p_config->rss_mode, p_config->update_rss_capabilities, p_config->capabilities, p_config->update_rss_ind_table, p_config->update_rss_key); table_size = min_t(int, QED_RSS_IND_TABLE_SIZE, 1 << p_config->tbl_size); for (i = 0; i < table_size; i++) { struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i]; if (!p_queue) return -EINVAL; p_config->indirection_table[i] = cpu_to_le16(p_queue->abs.queue_id); } DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "Configured RSS indirection table [%d entries]:\n", table_size); for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) { DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", le16_to_cpu(p_config->indirection_table[i]), le16_to_cpu(p_config->indirection_table[i + 1]), le16_to_cpu(p_config->indirection_table[i + 2]), le16_to_cpu(p_config->indirection_table[i + 3]), le16_to_cpu(p_config->indirection_table[i + 4]), le16_to_cpu(p_config->indirection_table[i + 5]), le16_to_cpu(p_config->indirection_table[i + 6]), le16_to_cpu(p_config->indirection_table[i + 7]), le16_to_cpu(p_config->indirection_table[i + 8]), le16_to_cpu(p_config->indirection_table[i + 9]), le16_to_cpu(p_config->indirection_table[i + 10]), le16_to_cpu(p_config->indirection_table[i + 11]), le16_to_cpu(p_config->indirection_table[i + 12]), le16_to_cpu(p_config->indirection_table[i + 13]), le16_to_cpu(p_config->indirection_table[i + 14]), le16_to_cpu(p_config->indirection_table[i + 15])); } for (i = 0; i < 10; i++) p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]); return rc; } static void qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, struct vport_update_ramrod_data *p_ramrod, struct qed_filter_accept_flags accept_flags) { p_ramrod->common.update_rx_mode_flg = accept_flags.update_rx_mode_config; p_ramrod->common.update_tx_mode_flg = accept_flags.update_tx_mode_config; /* Set Rx mode accept flags */ if (p_ramrod->common.update_rx_mode_flg) { u8 accept_filter = accept_flags.rx_accept_filter; u16 state = 0; SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) || !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)); SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) || !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, !!(accept_filter & QED_ACCEPT_BCAST)); SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI, !!(accept_filter & QED_ACCEPT_ANY_VNI)); p_ramrod->rx_mode.state = cpu_to_le16(state); DP_VERBOSE(p_hwfn, QED_MSG_SP, "p_ramrod->rx_mode.state = 0x%x\n", state); } /* Set Tx mode accept flags */ if (p_ramrod->common.update_tx_mode_flg) { u8 accept_filter = accept_flags.tx_accept_filter; u16 state = 0; SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, !!(accept_filter & QED_ACCEPT_NONE)); SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, !!(accept_filter & QED_ACCEPT_NONE)); SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, !!(accept_filter & QED_ACCEPT_BCAST)); p_ramrod->tx_mode.state = cpu_to_le16(state); DP_VERBOSE(p_hwfn, QED_MSG_SP, "p_ramrod->tx_mode.state = 0x%x\n", state); } } static void qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn, struct vport_update_ramrod_data *p_ramrod, const struct qed_sge_tpa_params *param) { struct eth_vport_tpa_param *tpa; if (!param) { p_ramrod->common.update_tpa_param_flg = 0; p_ramrod->common.update_tpa_en_flg = 0; p_ramrod->common.update_tpa_param_flg = 0; return; } p_ramrod->common.update_tpa_en_flg = param->update_tpa_en_flg; tpa = &p_ramrod->tpa_param; tpa->tpa_ipv4_en_flg = param->tpa_ipv4_en_flg; tpa->tpa_ipv6_en_flg = param->tpa_ipv6_en_flg; tpa->tpa_ipv4_tunn_en_flg = param->tpa_ipv4_tunn_en_flg; tpa->tpa_ipv6_tunn_en_flg = param->tpa_ipv6_tunn_en_flg; p_ramrod->common.update_tpa_param_flg = param->update_tpa_param_flg; tpa->max_buff_num = param->max_buffers_per_cqe; tpa->tpa_pkt_split_flg = param->tpa_pkt_split_flg; tpa->tpa_hdr_data_split_flg = param->tpa_hdr_data_split_flg; tpa->tpa_gro_consistent_flg = param->tpa_gro_consistent_flg; tpa->tpa_max_aggs_num = param->tpa_max_aggs_num; tpa->tpa_max_size = cpu_to_le16(param->tpa_max_size); tpa->tpa_min_size_to_start = cpu_to_le16(param->tpa_min_size_to_start); tpa->tpa_min_size_to_cont = cpu_to_le16(param->tpa_min_size_to_cont); } static void qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, struct vport_update_ramrod_data *p_ramrod, struct qed_sp_vport_update_params *p_params) { int i; memset(&p_ramrod->approx_mcast.bins, 0, sizeof(p_ramrod->approx_mcast.bins)); if (!p_params->update_approx_mcast_flg) return; p_ramrod->common.update_approx_mcast_flg = 1; for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { u32 *p_bins = p_params->bins; p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); } } int qed_sp_vport_update(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_params, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data) { struct qed_rss_params *p_rss_params = p_params->rss_params; struct vport_update_ramrod_data_cmn *p_cmn; struct qed_sp_init_data init_data; struct vport_update_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; u8 abs_vport_id = 0, val; int rc = -EINVAL; if (IS_VF(p_hwfn->cdev)) { rc = qed_vf_pf_vport_update(p_hwfn, p_params); return rc; } rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc) return rc; memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_params->opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_data; rc = qed_sp_init_request(p_hwfn, &p_ent, ETH_RAMROD_VPORT_UPDATE, PROTOCOLID_ETH, &init_data); if (rc) return rc; /* Copy input params to ramrod according to FW struct */ p_ramrod = &p_ent->ramrod.vport_update; p_cmn = &p_ramrod->common; p_cmn->vport_id = abs_vport_id; p_cmn->rx_active_flg = p_params->vport_active_rx_flg; p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; p_cmn->tx_active_flg = p_params->vport_active_tx_flg; p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; p_cmn->accept_any_vlan = p_params->accept_any_vlan; val = p_params->update_accept_any_vlan_flg; p_cmn->update_accept_any_vlan_flg = val; p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; val = p_params->update_inner_vlan_removal_flg; p_cmn->update_inner_vlan_removal_en_flg = val; p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; val = p_params->update_default_vlan_enable_flg; p_cmn->update_default_vlan_en_flg = val; p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan); p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; val = p_params->update_anti_spoofing_en_flg; p_ramrod->common.update_anti_spoofing_en_flg = val; rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); if (rc) { qed_sp_destroy_request(p_hwfn, p_ent); return rc; } if (p_params->update_ctl_frame_check) { p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en; p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en; } /* Update mcast bins for VFs, PF doesn't use this functionality */ qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params); return qed_spq_post(p_hwfn, p_ent, NULL); } int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) { struct vport_stop_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; u8 abs_vport_id = 0; int rc; if (IS_VF(p_hwfn->cdev)) return qed_vf_pf_vport_stop(p_hwfn); rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); if (rc) return rc; memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, ETH_RAMROD_VPORT_STOP, PROTOCOLID_ETH, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.vport_stop; p_ramrod->vport_id = abs_vport_id; return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn, struct qed_filter_accept_flags *p_accept_flags) { struct qed_sp_vport_update_params s_params; memset(&s_params, 0, sizeof(s_params)); memcpy(&s_params.accept_flags, p_accept_flags, sizeof(struct qed_filter_accept_flags)); return qed_vf_pf_vport_update(p_hwfn, &s_params); } static int qed_filter_accept_cmd(struct qed_dev *cdev, u8 vport, struct qed_filter_accept_flags accept_flags, u8 update_accept_any_vlan, u8 accept_any_vlan, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data) { struct qed_sp_vport_update_params vport_update_params; int i, rc; /* Prepare and send the vport rx_mode change */ memset(&vport_update_params, 0, sizeof(vport_update_params)); vport_update_params.vport_id = vport; vport_update_params.accept_flags = accept_flags; vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; vport_update_params.accept_any_vlan = accept_any_vlan; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; if (IS_VF(cdev)) { rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags); if (rc) return rc; continue; } rc = qed_sp_vport_update(p_hwfn, &vport_update_params, comp_mode, p_comp_data); if (rc) { DP_ERR(cdev, "Update rx_mode failed %d\n", rc); return rc; } DP_VERBOSE(p_hwfn, QED_MSG_SP, "Accept filter configured, flags = [Rx]%x [Tx]%x\n", accept_flags.rx_accept_filter, accept_flags.tx_accept_filter); if (update_accept_any_vlan) DP_VERBOSE(p_hwfn, QED_MSG_SP, "accept_any_vlan=%d configured\n", accept_any_vlan); } return 0; } int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) { struct rx_queue_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; DP_VERBOSE(p_hwfn, QED_MSG_SP, "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id); /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_cid->cid; init_data.opaque_fid = p_cid->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, ETH_RAMROD_RX_QUEUE_START, PROTOCOLID_ETH, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.rx_queue_start; p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); p_ramrod->sb_index = p_cid->sb_idx; p_ramrod->vport_id = p_cid->abs.vport_id; p_ramrod->stats_counter_id = p_cid->abs.stats_id; p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); p_ramrod->complete_cqe_flg = 0; p_ramrod->complete_event_flg = 1; p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); if (p_cid->vfid != QED_QUEUE_CID_SELF) { bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_RX_PROD); p_ramrod->vf_rx_prod_index = p_cid->vf_qid; DP_VERBOSE(p_hwfn, QED_MSG_SP, "Queue%s is meant for VF rxq[%02x]\n", b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid); p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf; } return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size, void __iomem **pp_prod) { u32 init_prod_val = 0; *pp_prod = (u8 __iomem *) p_hwfn->regview + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, MSTORM_ETH_PF_PRODS, p_cid->abs.queue_id); /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)(&init_prod_val)); return qed_eth_rxq_start_ramrod(p_hwfn, p_cid, bd_max_bytes, bd_chain_phys_addr, cqe_pbl_addr, cqe_pbl_size); } static int qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn, u16 opaque_fid, struct qed_queue_start_common_params *p_params, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size, struct qed_rxq_start_ret_params *p_ret_params) { struct qed_queue_cid *p_cid; int rc; /* Allocate a CID for the queue */ p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params); if (!p_cid) return -ENOMEM; if (IS_PF(p_hwfn->cdev)) { rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid, bd_max_bytes, bd_chain_phys_addr, cqe_pbl_addr, cqe_pbl_size, &p_ret_params->p_prod); } else { rc = qed_vf_pf_rxq_start(p_hwfn, p_cid, bd_max_bytes, bd_chain_phys_addr, cqe_pbl_addr, cqe_pbl_size, &p_ret_params->p_prod); } /* Provide the caller with a reference to as handler */ if (rc) qed_eth_queue_cid_release(p_hwfn, p_cid); else p_ret_params->p_handle = (void *)p_cid; return rc; } int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, void **pp_rxq_handles, u8 num_rxqs, u8 complete_cqe_flg, u8 complete_event_flg, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data) { struct rx_queue_update_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; struct qed_queue_cid *p_cid; int rc = -EINVAL; u8 i; memset(&init_data, 0, sizeof(init_data)); init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_data; for (i = 0; i < num_rxqs; i++) { p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i]; /* Get SPQ entry */ init_data.cid = p_cid->cid; init_data.opaque_fid = p_cid->opaque_fid; rc = qed_sp_init_request(p_hwfn, &p_ent, ETH_RAMROD_RX_QUEUE_UPDATE, PROTOCOLID_ETH, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.rx_queue_update; p_ramrod->vport_id = p_cid->abs.vport_id; p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); p_ramrod->complete_cqe_flg = complete_cqe_flg; p_ramrod->complete_event_flg = complete_event_flg; rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) return rc; } return rc; } static int qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, bool b_eq_completion_only, bool b_cqe_completion) { struct rx_queue_stop_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc; memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_cid->cid; init_data.opaque_fid = p_cid->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, ETH_RAMROD_RX_QUEUE_STOP, PROTOCOLID_ETH, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.rx_queue_stop; p_ramrod->vport_id = p_cid->abs.vport_id; p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); /* Cleaning the queue requires the completion to arrive there. * In addition, VFs require the answer to come as eqe to PF. */ p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) && !b_eq_completion_only) || b_cqe_completion; p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) || b_eq_completion_only; return qed_spq_post(p_hwfn, p_ent, NULL); } int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, void *p_rxq, bool eq_completion_only, bool cqe_completion) { struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq; int rc = -EINVAL; if (IS_PF(p_hwfn->cdev)) rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid, eq_completion_only, cqe_completion); else rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); if (!rc) qed_eth_queue_cid_release(p_hwfn, p_cid); return rc; } int qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id) { struct tx_queue_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_cid->cid; init_data.opaque_fid = p_cid->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, ETH_RAMROD_TX_QUEUE_START, PROTOCOLID_ETH, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.tx_queue_start; p_ramrod->vport_id = p_cid->abs.vport_id; p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); p_ramrod->sb_index = p_cid->sb_idx; p_ramrod->stats_counter_id = p_cid->abs.stats_id; p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id); p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id); p_ramrod->pbl_size = cpu_to_le16(pbl_size); DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); p_ramrod->qm_pq_id = cpu_to_le16(pq_id); return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, u8 tc, dma_addr_t pbl_addr, u16 pbl_size, void __iomem **pp_doorbell) { int rc; rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr, pbl_size, qed_get_cm_pq_idx_mcos(p_hwfn, tc)); if (rc) return rc; /* Provide the caller with the necessary return values */ *pp_doorbell = p_hwfn->doorbells + qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY); return 0; } static int qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn, u16 opaque_fid, struct qed_queue_start_common_params *p_params, u8 tc, dma_addr_t pbl_addr, u16 pbl_size, struct qed_txq_start_ret_params *p_ret_params) { struct qed_queue_cid *p_cid; int rc; p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params); if (!p_cid) return -EINVAL; if (IS_PF(p_hwfn->cdev)) rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, pbl_addr, pbl_size, &p_ret_params->p_doorbell); else rc = qed_vf_pf_txq_start(p_hwfn, p_cid, pbl_addr, pbl_size, &p_ret_params->p_doorbell); if (rc) qed_eth_queue_cid_release(p_hwfn, p_cid); else p_ret_params->p_handle = (void *)p_cid; return rc; } static int qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc; memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_cid->cid; init_data.opaque_fid = p_cid->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, ETH_RAMROD_TX_QUEUE_STOP, PROTOCOLID_ETH, &init_data); if (rc) return rc; return qed_spq_post(p_hwfn, p_ent, NULL); } int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle) { struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle; int rc; if (IS_PF(p_hwfn->cdev)) rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid); else rc = qed_vf_pf_txq_stop(p_hwfn, p_cid); if (!rc) qed_eth_queue_cid_release(p_hwfn, p_cid); return rc; } static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) { enum eth_filter_action action = MAX_ETH_FILTER_ACTION; switch (opcode) { case QED_FILTER_ADD: action = ETH_FILTER_ACTION_ADD; break; case QED_FILTER_REMOVE: action = ETH_FILTER_ACTION_REMOVE; break; case QED_FILTER_FLUSH: action = ETH_FILTER_ACTION_REMOVE_ALL; break; default: action = MAX_ETH_FILTER_ACTION; } return action; } static int qed_filter_ucast_common(struct qed_hwfn *p_hwfn, u16 opaque_fid, struct qed_filter_ucast *p_filter_cmd, struct vport_filter_update_ramrod_data **pp_ramrod, struct qed_spq_entry **pp_ent, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data) { u8 vport_to_add_to = 0, vport_to_remove_from = 0; struct vport_filter_update_ramrod_data *p_ramrod; struct eth_filter_cmd *p_first_filter; struct eth_filter_cmd *p_second_filter; struct qed_sp_init_data init_data; enum eth_filter_action action; int rc; rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, &vport_to_remove_from); if (rc) return rc; rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, &vport_to_add_to); if (rc) return rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_data; rc = qed_sp_init_request(p_hwfn, pp_ent, ETH_RAMROD_FILTERS_UPDATE, PROTOCOLID_ETH, &init_data); if (rc) return rc; *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; p_ramrod = *pp_ramrod; p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; switch (p_filter_cmd->opcode) { case QED_FILTER_REPLACE: case QED_FILTER_MOVE: p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; default: p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; } p_first_filter = &p_ramrod->filter_cmds[0]; p_second_filter = &p_ramrod->filter_cmds[1]; switch (p_filter_cmd->type) { case QED_FILTER_MAC: p_first_filter->type = ETH_FILTER_TYPE_MAC; break; case QED_FILTER_VLAN: p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; case QED_FILTER_MAC_VLAN: p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; case QED_FILTER_INNER_MAC: p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; case QED_FILTER_INNER_VLAN: p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; case QED_FILTER_INNER_PAIR: p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; case QED_FILTER_INNER_MAC_VNI_PAIR: p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; break; case QED_FILTER_MAC_VNI_PAIR: p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; case QED_FILTER_VNI: p_first_filter->type = ETH_FILTER_TYPE_VNI; break; } if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) { qed_set_fw_mac_addr(&p_first_filter->mac_msb, &p_first_filter->mac_mid, &p_first_filter->mac_lsb, (u8 *)p_filter_cmd->mac); } if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan); if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || (p_first_filter->type == ETH_FILTER_TYPE_VNI)) p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni); if (p_filter_cmd->opcode == QED_FILTER_MOVE) { p_second_filter->type = p_first_filter->type; p_second_filter->mac_msb = p_first_filter->mac_msb; p_second_filter->mac_mid = p_first_filter->mac_mid; p_second_filter->mac_lsb = p_first_filter->mac_lsb; p_second_filter->vlan_id = p_first_filter->vlan_id; p_second_filter->vni = p_first_filter->vni; p_first_filter->action = ETH_FILTER_ACTION_REMOVE; p_first_filter->vport_id = vport_to_remove_from; p_second_filter->action = ETH_FILTER_ACTION_ADD; p_second_filter->vport_id = vport_to_add_to; } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { p_first_filter->vport_id = vport_to_add_to; memcpy(p_second_filter, p_first_filter, sizeof(*p_second_filter)); p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; p_second_filter->action = ETH_FILTER_ACTION_ADD; } else { action = qed_filter_action(p_filter_cmd->opcode); if (action == MAX_ETH_FILTER_ACTION) { DP_NOTICE(p_hwfn, "%d is not supported yet\n", p_filter_cmd->opcode); qed_sp_destroy_request(p_hwfn, *pp_ent); return -EINVAL; } p_first_filter->action = action; p_first_filter->vport_id = (p_filter_cmd->opcode == QED_FILTER_REMOVE) ? vport_to_remove_from : vport_to_add_to; } return 0; } int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, u16 opaque_fid, struct qed_filter_ucast *p_filter_cmd, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data) { struct vport_filter_update_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct eth_filter_cmd_header *p_header; int rc; rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, &p_ramrod, &p_ent, comp_mode, p_comp_data); if (rc) { DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); return rc; } p_header = &p_ramrod->filter_cmd_hdr; p_header->assert_on_error = p_filter_cmd->assert_on_error; rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) { DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc); return rc; } DP_VERBOSE(p_hwfn, QED_MSG_SP, "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" : ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ? "REMOVE" : ((p_filter_cmd->opcode == QED_FILTER_MOVE) ? "MOVE" : "REPLACE")), (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" : ((p_filter_cmd->type == QED_FILTER_VLAN) ? "VLAN" : "MAC & VLAN"), p_ramrod->filter_cmd_hdr.cmd_cnt, p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter); DP_VERBOSE(p_hwfn, QED_MSG_SP, "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", p_filter_cmd->vport_to_add_to, p_filter_cmd->vport_to_remove_from, p_filter_cmd->mac[0], p_filter_cmd->mac[1], p_filter_cmd->mac[2], p_filter_cmd->mac[3], p_filter_cmd->mac[4], p_filter_cmd->mac[5], p_filter_cmd->vlan); return 0; } /******************************************************************************* * Description: * Calculates crc 32 on a buffer * Note: crc32_length MUST be aligned to 8 * Return: ******************************************************************************/ static u32 qed_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed, u8 complement) { u32 byte = 0, bit = 0, crc32_result = crc32_seed; u8 msb = 0, current_byte = 0; if ((!crc32_packet) || (crc32_length == 0) || ((crc32_length % 8) != 0)) return crc32_result; for (byte = 0; byte < crc32_length; byte++) { current_byte = crc32_packet[byte]; for (bit = 0; bit < 8; bit++) { msb = (u8)(crc32_result >> 31); crc32_result = crc32_result << 1; if (msb != (0x1 & (current_byte >> bit))) { crc32_result = crc32_result ^ CRC32_POLY; crc32_result |= 1; /*crc32_result[0] = 1;*/ } } } return crc32_result; } static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len) { u32 packet_buf[2] = { 0 }; memcpy((u8 *)(&packet_buf[0]), &mac[0], 6); return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0); } u8 qed_mcast_bin_from_mac(u8 *mac) { u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac, ETH_ALEN); return crc & 0xff; } static int qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, u16 opaque_fid, struct qed_filter_mcast *p_filter_cmd, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data) { struct vport_update_ramrod_data *p_ramrod = NULL; u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u8 abs_vport_id = 0; int rc, i; if (p_filter_cmd->opcode == QED_FILTER_ADD) rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, &abs_vport_id); else rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, &abs_vport_id); if (rc) return rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_data; rc = qed_sp_init_request(p_hwfn, &p_ent, ETH_RAMROD_VPORT_UPDATE, PROTOCOLID_ETH, &init_data); if (rc) { DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); return rc; } p_ramrod = &p_ent->ramrod.vport_update; p_ramrod->common.update_approx_mcast_flg = 1; /* explicitly clear out the entire vector */ memset(&p_ramrod->approx_mcast.bins, 0, sizeof(p_ramrod->approx_mcast.bins)); memset(bins, 0, sizeof(bins)); /* filter ADD op is explicit set op and it removes * any existing filters for the vport */ if (p_filter_cmd->opcode == QED_FILTER_ADD) { for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { u32 bit, nbits; bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); nbits = sizeof(u32) * BITS_PER_BYTE; bins[bit / nbits] |= 1 << (bit % nbits); } /* Convert to correct endianity */ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { struct vport_update_ramrod_mcast *p_ramrod_bins; p_ramrod_bins = &p_ramrod->approx_mcast; p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]); } } p_ramrod->common.vport_id = abs_vport_id; return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_filter_mcast_cmd(struct qed_dev *cdev, struct qed_filter_mcast *p_filter_cmd, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data) { int rc = 0; int i; /* only ADD and REMOVE operations are supported for multi-cast */ if ((p_filter_cmd->opcode != QED_FILTER_ADD && (p_filter_cmd->opcode != QED_FILTER_REMOVE)) || (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS)) return -EINVAL; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; u16 opaque_fid; if (IS_VF(cdev)) { qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); continue; } opaque_fid = p_hwfn->hw_info.opaque_fid; rc = qed_sp_eth_filter_mcast(p_hwfn, opaque_fid, p_filter_cmd, comp_mode, p_comp_data); } return rc; } static int qed_filter_ucast_cmd(struct qed_dev *cdev, struct qed_filter_ucast *p_filter_cmd, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data) { int rc = 0; int i; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; u16 opaque_fid; if (IS_VF(cdev)) { rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); continue; } opaque_fid = p_hwfn->hw_info.opaque_fid; rc = qed_sp_eth_filter_ucast(p_hwfn, opaque_fid, p_filter_cmd, comp_mode, p_comp_data); if (rc) break; } return rc; } /* Statistics related code */ static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, u32 *p_addr, u32 *p_len, u16 statistics_bin) { if (IS_PF(p_hwfn->cdev)) { *p_addr = BAR0_MAP_REG_PSDM_RAM + PSTORM_QUEUE_STAT_OFFSET(statistics_bin); *p_len = sizeof(struct eth_pstorm_per_queue_stat); } else { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; *p_addr = p_resp->pfdev_info.stats_info.pstats.address; *p_len = p_resp->pfdev_info.stats_info.pstats.len; } } static noinline_for_stack void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_eth_stats *p_stats, u16 statistics_bin) { struct eth_pstorm_per_queue_stat pstats; u32 pstats_addr = 0, pstats_len = 0; __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, statistics_bin); memset(&pstats, 0, sizeof(pstats)); qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); p_stats->common.tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes); p_stats->common.tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes); p_stats->common.tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes); p_stats->common.tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts); p_stats->common.tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts); p_stats->common.tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts); p_stats->common.tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts); } static noinline_for_stack void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_eth_stats *p_stats, u16 statistics_bin) { struct tstorm_per_port_stat tstats; u32 tstats_addr, tstats_len; if (IS_PF(p_hwfn->cdev)) { tstats_addr = BAR0_MAP_REG_TSDM_RAM + TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); tstats_len = sizeof(struct tstorm_per_port_stat); } else { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; tstats_len = p_resp->pfdev_info.stats_info.tstats.len; } memset(&tstats, 0, sizeof(tstats)); qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); p_stats->common.mftag_filter_discards += HILO_64_REGPAIR(tstats.mftag_filter_discard); p_stats->common.mac_filter_discards += HILO_64_REGPAIR(tstats.eth_mac_filter_discard); p_stats->common.gft_filter_drop += HILO_64_REGPAIR(tstats.eth_gft_drop_pkt); } static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, u32 *p_addr, u32 *p_len, u16 statistics_bin) { if (IS_PF(p_hwfn->cdev)) { *p_addr = BAR0_MAP_REG_USDM_RAM + USTORM_QUEUE_STAT_OFFSET(statistics_bin); *p_len = sizeof(struct eth_ustorm_per_queue_stat); } else { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; *p_addr = p_resp->pfdev_info.stats_info.ustats.address; *p_len = p_resp->pfdev_info.stats_info.ustats.len; } } static noinline_for_stack void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_eth_stats *p_stats, u16 statistics_bin) { struct eth_ustorm_per_queue_stat ustats; u32 ustats_addr = 0, ustats_len = 0; __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, statistics_bin); memset(&ustats, 0, sizeof(ustats)); qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); p_stats->common.rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes); p_stats->common.rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes); p_stats->common.rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes); p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); } static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, u32 *p_addr, u32 *p_len, u16 statistics_bin) { if (IS_PF(p_hwfn->cdev)) { *p_addr = BAR0_MAP_REG_MSDM_RAM + MSTORM_QUEUE_STAT_OFFSET(statistics_bin); *p_len = sizeof(struct eth_mstorm_per_queue_stat); } else { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; *p_addr = p_resp->pfdev_info.stats_info.mstats.address; *p_len = p_resp->pfdev_info.stats_info.mstats.len; } } static noinline_for_stack void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_eth_stats *p_stats, u16 statistics_bin) { struct eth_mstorm_per_queue_stat mstats; u32 mstats_addr = 0, mstats_len = 0; __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, statistics_bin); memset(&mstats, 0, sizeof(mstats)); qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); p_stats->common.no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard); p_stats->common.packet_too_big_discard += HILO_64_REGPAIR(mstats.packet_too_big_discard); p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); p_stats->common.tpa_coalesced_pkts += HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); p_stats->common.tpa_coalesced_events += HILO_64_REGPAIR(mstats.tpa_coalesced_events); p_stats->common.tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num); p_stats->common.tpa_coalesced_bytes += HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); } static noinline_for_stack void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_eth_stats *p_stats) { struct qed_eth_stats_common *p_common = &p_stats->common; struct port_stats port_stats; int j; memset(&port_stats, 0, sizeof(port_stats)); qed_memcpy_from(p_hwfn, p_ptt, &port_stats, p_hwfn->mcp_info->port_addr + offsetof(struct public_port, stats), sizeof(port_stats)); p_common->rx_64_byte_packets += port_stats.eth.r64; p_common->rx_65_to_127_byte_packets += port_stats.eth.r127; p_common->rx_128_to_255_byte_packets += port_stats.eth.r255; p_common->rx_256_to_511_byte_packets += port_stats.eth.r511; p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023; p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; p_common->rx_crc_errors += port_stats.eth.rfcs; p_common->rx_mac_crtl_frames += port_stats.eth.rxcf; p_common->rx_pause_frames += port_stats.eth.rxpf; p_common->rx_pfc_frames += port_stats.eth.rxpp; p_common->rx_align_errors += port_stats.eth.raln; p_common->rx_carrier_errors += port_stats.eth.rfcr; p_common->rx_oversize_packets += port_stats.eth.rovr; p_common->rx_jabbers += port_stats.eth.rjbr; p_common->rx_undersize_packets += port_stats.eth.rund; p_common->rx_fragments += port_stats.eth.rfrg; p_common->tx_64_byte_packets += port_stats.eth.t64; p_common->tx_65_to_127_byte_packets += port_stats.eth.t127; p_common->tx_128_to_255_byte_packets += port_stats.eth.t255; p_common->tx_256_to_511_byte_packets += port_stats.eth.t511; p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023; p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; p_common->tx_pause_frames += port_stats.eth.txpf; p_common->tx_pfc_frames += port_stats.eth.txpp; p_common->rx_mac_bytes += port_stats.eth.rbyte; p_common->rx_mac_uc_packets += port_stats.eth.rxuca; p_common->rx_mac_mc_packets += port_stats.eth.rxmca; p_common->rx_mac_bc_packets += port_stats.eth.rxbca; p_common->rx_mac_frames_ok += port_stats.eth.rxpok; p_common->tx_mac_bytes += port_stats.eth.tbyte; p_common->tx_mac_uc_packets += port_stats.eth.txuca; p_common->tx_mac_mc_packets += port_stats.eth.txmca; p_common->tx_mac_bc_packets += port_stats.eth.txbca; p_common->tx_mac_ctrl_frames += port_stats.eth.txcf; for (j = 0; j < 8; j++) { p_common->brb_truncates += port_stats.brb.brb_truncate[j]; p_common->brb_discards += port_stats.brb.brb_discard[j]; } if (QED_IS_BB(p_hwfn->cdev)) { struct qed_eth_stats_bb *p_bb = &p_stats->bb; p_bb->rx_1519_to_1522_byte_packets += port_stats.eth.u0.bb0.r1522; p_bb->rx_1519_to_2047_byte_packets += port_stats.eth.u0.bb0.r2047; p_bb->rx_2048_to_4095_byte_packets += port_stats.eth.u0.bb0.r4095; p_bb->rx_4096_to_9216_byte_packets += port_stats.eth.u0.bb0.r9216; p_bb->rx_9217_to_16383_byte_packets += port_stats.eth.u0.bb0.r16383; p_bb->tx_1519_to_2047_byte_packets += port_stats.eth.u1.bb1.t2047; p_bb->tx_2048_to_4095_byte_packets += port_stats.eth.u1.bb1.t4095; p_bb->tx_4096_to_9216_byte_packets += port_stats.eth.u1.bb1.t9216; p_bb->tx_9217_to_16383_byte_packets += port_stats.eth.u1.bb1.t16383; p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec; p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl; } else { struct qed_eth_stats_ah *p_ah = &p_stats->ah; p_ah->rx_1519_to_max_byte_packets += port_stats.eth.u0.ah0.r1519_to_max; p_ah->tx_1519_to_max_byte_packets = port_stats.eth.u1.ah1.t1519_to_max; } p_common->link_change_count = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + offsetof(struct public_port, link_change_count)); } static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_eth_stats *stats, u16 statistics_bin, bool b_get_port_stats) { __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); if (b_get_port_stats && p_hwfn->mcp_info) __qed_get_vport_port_stats(p_hwfn, p_ptt, stats); } static void _qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats, bool is_atomic) { u8 fw_vport = 0; int i; memset(stats, 0, sizeof(*stats)); for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_ptt *p_ptt; bool b_get_port_stats; p_ptt = IS_PF(cdev) ? qed_ptt_acquire_context(p_hwfn, is_atomic) : NULL; if (IS_PF(cdev)) { /* The main vport index is relative first */ if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { DP_ERR(p_hwfn, "No vport available!\n"); goto out; } } if (IS_PF(cdev) && !p_ptt) { DP_ERR(p_hwfn, "Failed to acquire ptt\n"); continue; } b_get_port_stats = IS_PF(cdev) && IS_LEAD_HWFN(p_hwfn); __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, b_get_port_stats); out: if (IS_PF(cdev) && p_ptt) qed_ptt_release(p_hwfn, p_ptt); } } void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) { qed_get_vport_stats_context(cdev, stats, false); } void qed_get_vport_stats_context(struct qed_dev *cdev, struct qed_eth_stats *stats, bool is_atomic) { u32 i; if (!cdev || cdev->recov_in_prog) { memset(stats, 0, sizeof(*stats)); return; } _qed_get_vport_stats(cdev, stats, is_atomic); if (!cdev->reset_stats) return; /* Reduce the statistics baseline */ for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; } /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ void qed_reset_vport_stats(struct qed_dev *cdev) { int i; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct eth_mstorm_per_queue_stat mstats; struct eth_ustorm_per_queue_stat ustats; struct eth_pstorm_per_queue_stat pstats; struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) : NULL; u32 addr = 0, len = 0; if (IS_PF(cdev) && !p_ptt) { DP_ERR(p_hwfn, "Failed to acquire ptt\n"); continue; } memset(&mstats, 0, sizeof(mstats)); __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); memset(&ustats, 0, sizeof(ustats)); __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); memset(&pstats, 0, sizeof(pstats)); __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); if (IS_PF(cdev)) qed_ptt_release(p_hwfn, p_ptt); } /* PORT statistics are not necessarily reset, so we need to * read and create a baseline for future statistics. * Link change stat is maintained by MFW, return its value as is. */ if (!cdev->reset_stats) { DP_INFO(cdev, "Reset stats not allocated\n"); } else { _qed_get_vport_stats(cdev, cdev->reset_stats, false); cdev->reset_stats->common.link_change_count = 0; } } static enum gft_profile_type qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode) { if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE) return GFT_PROFILE_TYPE_4_TUPLE; if (mode == QED_FILTER_CONFIG_MODE_IP_DEST) return GFT_PROFILE_TYPE_IP_DST_ADDR; if (mode == QED_FILTER_CONFIG_MODE_IP_SRC) return GFT_PROFILE_TYPE_IP_SRC_ADDR; return GFT_PROFILE_TYPE_L4_DST_PORT; } void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_arfs_config_params *p_cfg_params) { if (test_bit(QED_MF_DISABLE_ARFS, &p_hwfn->cdev->mf_bits)) return; if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) { qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id, p_cfg_params->tcp, p_cfg_params->udp, p_cfg_params->ipv4, p_cfg_params->ipv6, qed_arfs_mode_to_hsi(p_cfg_params->mode)); DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n", p_cfg_params->tcp ? "Enable" : "Disable", p_cfg_params->udp ? "Enable" : "Disable", p_cfg_params->ipv4 ? "Enable" : "Disable", p_cfg_params->ipv6 ? "Enable" : "Disable", (u32)p_cfg_params->mode); } else { DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n"); qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); } } int qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_cb, struct qed_ntuple_filter_params *p_params) { struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u16 abs_rx_q_id = 0; u8 abs_vport_id = 0; int rc = -EINVAL; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; if (p_cb) { init_data.comp_mode = QED_SPQ_MODE_CB; init_data.p_comp_data = p_cb; } else { init_data.comp_mode = QED_SPQ_MODE_EBLOCK; } rc = qed_sp_init_request(p_hwfn, &p_ent, ETH_RAMROD_RX_UPDATE_GFT_FILTER, PROTOCOLID_ETH, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.rx_update_gft; DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr); p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length); if (p_params->b_is_drop) { p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT); } else { rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc) goto err; if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id); if (rc) goto err; p_ramrod->rx_qid_valid = 1; p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); } p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id); } p_ramrod->flow_id_valid = 0; p_ramrod->flow_id = 0; p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; DP_VERBOSE(p_hwfn, QED_MSG_SP, "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n", abs_vport_id, abs_rx_q_id, p_params->b_is_add ? "Adding" : "Removing", (u64)p_params->addr, p_params->length); return qed_spq_post(p_hwfn, p_ent, NULL); err: qed_sp_destroy_request(p_hwfn, p_ent); return rc; } int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_queue_cid *p_cid, u16 *p_rx_coal) { u32 coalesce, address, is_valid; struct cau_sb_entry sb_entry; u8 timer_res; int rc; rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + p_cid->sb_igu_id * sizeof(u64), (u64)(uintptr_t)&sb_entry, 2, NULL); if (rc) { DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); return rc; } timer_res = GET_FIELD(le32_to_cpu(sb_entry.params), CAU_SB_ENTRY_TIMER_RES0); address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); coalesce = qed_rd(p_hwfn, p_ptt, address); is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); if (!is_valid) return -EINVAL; coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); *p_rx_coal = (u16)(coalesce << timer_res); return 0; } int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_queue_cid *p_cid, u16 *p_tx_coal) { u32 coalesce, address, is_valid; struct cau_sb_entry sb_entry; u8 timer_res; int rc; rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + p_cid->sb_igu_id * sizeof(u64), (u64)(uintptr_t)&sb_entry, 2, NULL); if (rc) { DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); return rc; } timer_res = GET_FIELD(le32_to_cpu(sb_entry.params), CAU_SB_ENTRY_TIMER_RES1); address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); coalesce = qed_rd(p_hwfn, p_ptt, address); is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); if (!is_valid) return -EINVAL; coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); *p_tx_coal = (u16)(coalesce << timer_res); return 0; } int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle) { struct qed_queue_cid *p_cid = handle; struct qed_ptt *p_ptt; int rc = 0; if (IS_VF(p_hwfn->cdev)) { rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid); if (rc) DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); return rc; } p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EAGAIN; if (p_cid->b_is_rx) { rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); if (rc) goto out; } else { rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); if (rc) goto out; } out: qed_ptt_release(p_hwfn, p_ptt); return rc; } static int qed_fill_eth_dev_info(struct qed_dev *cdev, struct qed_dev_eth_info *info) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); int i; memset(info, 0, sizeof(*info)); if (IS_PF(cdev)) { int max_vf_vlan_filters = 0; int max_vf_mac_filters = 0; info->num_tc = p_hwfn->hw_info.num_hw_tc; if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { u16 num_queues = 0; /* Since the feature controls only queue-zones, * make sure we have the contexts [rx, xdp, tcs] to * match. */ for_each_hwfn(cdev, i) { struct qed_hwfn *hwfn = &cdev->hwfns[i]; u16 l2_queues = (u16)FEAT_NUM(hwfn, QED_PF_L2_QUE); u16 cids; cids = hwfn->pf_params.eth_pf_params.num_cons; cids /= (2 + info->num_tc); num_queues += min_t(u16, l2_queues, cids); } /* queues might theoretically be >256, but interrupts' * upper-limit guarantes that it would fit in a u8. */ if (cdev->int_params.fp_msix_cnt) { u8 irqs = cdev->int_params.fp_msix_cnt; info->num_queues = (u8)min_t(u16, num_queues, irqs); } } else { info->num_queues = cdev->num_hwfns; } if (IS_QED_SRIOV(cdev)) { max_vf_vlan_filters = cdev->p_iov_info->total_vfs * QED_ETH_VF_NUM_VLAN_FILTERS; max_vf_mac_filters = cdev->p_iov_info->total_vfs * QED_ETH_VF_NUM_MAC_FILTERS; } info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev), QED_VLAN) - max_vf_vlan_filters; info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev), QED_MAC) - max_vf_mac_filters; ether_addr_copy(info->port_mac, cdev->hwfns[0].hw_info.hw_mac_addr); info->xdp_supported = true; } else { u16 total_cids = 0; info->num_tc = 1; /* Determine queues & XDP support */ for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; u8 queues, cids; qed_vf_get_num_cids(p_hwfn, &cids); qed_vf_get_num_rxqs(p_hwfn, &queues); info->num_queues += queues; total_cids += cids; } /* Enable VF XDP in case PF guarntees sufficient connections */ if (total_cids >= info->num_queues * 3) info->xdp_supported = true; qed_vf_get_num_vlan_filters(&cdev->hwfns[0], (u8 *)&info->num_vlan_filters); qed_vf_get_num_mac_filters(&cdev->hwfns[0], (u8 *)&info->num_mac_filters); qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi; } qed_fill_dev_info(cdev, &info->common); if (IS_VF(cdev)) eth_zero_addr(info->common.hw_mac); return 0; } static void qed_register_eth_ops(struct qed_dev *cdev, struct qed_eth_cb_ops *ops, void *cookie) { cdev->protocol_ops.eth = ops; cdev->ops_cookie = cookie; /* For VF, we start bulletin reading */ if (IS_VF(cdev)) qed_vf_start_iov_wq(cdev); } static bool qed_check_mac(struct qed_dev *cdev, u8 *mac) { if (IS_PF(cdev)) return true; return qed_vf_check_mac(&cdev->hwfns[0], mac); } static int qed_start_vport(struct qed_dev *cdev, struct qed_start_vport_params *params) { int rc, i; for_each_hwfn(cdev, i) { struct qed_sp_vport_start_params start = { 0 }; struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : QED_TPA_MODE_NONE; start.remove_inner_vlan = params->remove_inner_vlan; start.only_untagged = true; /* untagged only */ start.drop_ttl0 = params->drop_ttl0; start.opaque_fid = p_hwfn->hw_info.opaque_fid; start.concrete_fid = p_hwfn->hw_info.concrete_fid; start.handle_ptp_pkts = params->handle_ptp_pkts; start.vport_id = params->vport_id; start.max_buffers_per_cqe = 16; start.mtu = params->mtu; rc = qed_sp_vport_start(p_hwfn, &start); if (rc) { DP_ERR(cdev, "Failed to start VPORT\n"); return rc; } rc = qed_hw_start_fastpath(p_hwfn); if (rc) { DP_ERR(cdev, "Failed to start VPORT fastpath\n"); return rc; } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), "Started V-PORT %d with MTU %d\n", start.vport_id, start.mtu); } if (params->clear_stats) qed_reset_vport_stats(cdev); return 0; } static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id) { int rc, i; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; rc = qed_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, vport_id); if (rc) { DP_ERR(cdev, "Failed to stop VPORT\n"); return rc; } } return 0; } static int qed_update_vport_rss(struct qed_dev *cdev, struct qed_update_vport_rss_params *input, struct qed_rss_params *rss) { int i, fn; /* Update configuration with what's correct regardless of CMT */ rss->update_rss_config = 1; rss->rss_enable = 1; rss->update_rss_capabilities = 1; rss->update_rss_ind_table = 1; rss->update_rss_key = 1; rss->rss_caps = input->rss_caps; memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32)); /* In regular scenario, we'd simply need to take input handlers. * But in CMT, we'd have to split the handlers according to the * engine they were configured on. We'd then have to understand * whether RSS is really required, since 2-queues on CMT doesn't * require RSS. */ if (cdev->num_hwfns == 1) { memcpy(rss->rss_ind_table, input->rss_ind_table, QED_RSS_IND_TABLE_SIZE * sizeof(void *)); rss->rss_table_size_log = 7; return 0; } /* Start by copying the non-spcific information to the 2nd copy */ memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params)); /* CMT should be round-robin */ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { struct qed_queue_cid *cid = input->rss_ind_table[i]; struct qed_rss_params *t_rss; if (cid->p_owner == QED_LEADING_HWFN(cdev)) t_rss = &rss[0]; else t_rss = &rss[1]; t_rss->rss_ind_table[i / cdev->num_hwfns] = cid; } /* Make sure RSS is actually required */ for_each_hwfn(cdev, fn) { for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) { if (rss[fn].rss_ind_table[i] != rss[fn].rss_ind_table[0]) break; } if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) { DP_VERBOSE(cdev, NETIF_MSG_IFUP, "CMT - 1 queue per-hwfn; Disabling RSS\n"); return -EINVAL; } rss[fn].rss_table_size_log = 6; } return 0; } static int qed_update_vport(struct qed_dev *cdev, struct qed_update_vport_params *params) { struct qed_sp_vport_update_params sp_params; struct qed_rss_params *rss; int rc = 0, i; if (!cdev) return -ENODEV; rss = vzalloc(array_size(sizeof(*rss), cdev->num_hwfns)); if (!rss) return -ENOMEM; memset(&sp_params, 0, sizeof(sp_params)); /* Translate protocol params into sp params */ sp_params.vport_id = params->vport_id; sp_params.update_vport_active_rx_flg = params->update_vport_active_flg; sp_params.update_vport_active_tx_flg = params->update_vport_active_flg; sp_params.vport_active_rx_flg = params->vport_active_flg; sp_params.vport_active_tx_flg = params->vport_active_flg; sp_params.update_tx_switching_flg = params->update_tx_switching_flg; sp_params.tx_switching_flg = params->tx_switching_flg; sp_params.accept_any_vlan = params->accept_any_vlan; sp_params.update_accept_any_vlan_flg = params->update_accept_any_vlan_flg; /* Prepare the RSS configuration */ if (params->update_rss_flg) if (qed_update_vport_rss(cdev, &params->rss_params, rss)) params->update_rss_flg = 0; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; if (params->update_rss_flg) sp_params.rss_params = &rss[i]; sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; rc = qed_sp_vport_update(p_hwfn, &sp_params, QED_SPQ_MODE_EBLOCK, NULL); if (rc) { DP_ERR(cdev, "Failed to update VPORT\n"); goto out; } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), "Updated V-PORT %d: active_flag %d [update %d]\n", params->vport_id, params->vport_active_flg, params->update_vport_active_flg); } out: vfree(rss); return rc; } static int qed_start_rxq(struct qed_dev *cdev, u8 rss_num, struct qed_queue_start_common_params *p_params, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size, struct qed_rxq_start_ret_params *ret_params) { struct qed_hwfn *p_hwfn; int rc, hwfn_index; hwfn_index = rss_num % cdev->num_hwfns; p_hwfn = &cdev->hwfns[hwfn_index]; p_params->queue_id = p_params->queue_id / cdev->num_hwfns; p_params->stats_id = p_params->vport_id; rc = qed_eth_rx_queue_start(p_hwfn, p_hwfn->hw_info.opaque_fid, p_params, bd_max_bytes, bd_chain_phys_addr, cqe_pbl_addr, cqe_pbl_size, ret_params); if (rc) { DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id); return rc; } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", p_params->queue_id, rss_num, p_params->vport_id, p_params->p_sb->igu_sb_id); return 0; } static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle) { int rc, hwfn_index; struct qed_hwfn *p_hwfn; hwfn_index = rss_id % cdev->num_hwfns; p_hwfn = &cdev->hwfns[hwfn_index]; rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false); if (rc) { DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id); return rc; } return 0; } static int qed_start_txq(struct qed_dev *cdev, u8 rss_num, struct qed_queue_start_common_params *p_params, dma_addr_t pbl_addr, u16 pbl_size, struct qed_txq_start_ret_params *ret_params) { struct qed_hwfn *p_hwfn; int rc, hwfn_index; hwfn_index = rss_num % cdev->num_hwfns; p_hwfn = &cdev->hwfns[hwfn_index]; p_params->queue_id = p_params->queue_id / cdev->num_hwfns; p_params->stats_id = p_params->vport_id; rc = qed_eth_tx_queue_start(p_hwfn, p_hwfn->hw_info.opaque_fid, p_params, p_params->tc, pbl_addr, pbl_size, ret_params); if (rc) { DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id); return rc; } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", p_params->queue_id, rss_num, p_params->vport_id, p_params->p_sb->igu_sb_id); return 0; } #define QED_HW_STOP_RETRY_LIMIT (10) static int qed_fastpath_stop(struct qed_dev *cdev) { int rc; rc = qed_hw_stop_fastpath(cdev); if (rc) { DP_ERR(cdev, "Failed to stop Fastpath\n"); return rc; } return 0; } static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle) { struct qed_hwfn *p_hwfn; int rc, hwfn_index; hwfn_index = rss_id % cdev->num_hwfns; p_hwfn = &cdev->hwfns[hwfn_index]; rc = qed_eth_tx_queue_stop(p_hwfn, handle); if (rc) { DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id); return rc; } return 0; } static int qed_tunn_configure(struct qed_dev *cdev, struct qed_tunn_params *tunn_params) { struct qed_tunnel_info tunn_info; int i, rc; memset(&tunn_info, 0, sizeof(tunn_info)); if (tunn_params->update_vxlan_port) { tunn_info.vxlan_port.b_update_port = true; tunn_info.vxlan_port.port = tunn_params->vxlan_port; } if (tunn_params->update_geneve_port) { tunn_info.geneve_port.b_update_port = true; tunn_info.geneve_port.port = tunn_params->geneve_port; } for_each_hwfn(cdev, i) { struct qed_hwfn *hwfn = &cdev->hwfns[i]; struct qed_ptt *p_ptt; struct qed_tunnel_info *tun; tun = &hwfn->cdev->tunnel; if (IS_PF(cdev)) { p_ptt = qed_ptt_acquire(hwfn); if (!p_ptt) return -EAGAIN; } else { p_ptt = NULL; } rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info, QED_SPQ_MODE_EBLOCK, NULL); if (rc) { if (IS_PF(cdev)) qed_ptt_release(hwfn, p_ptt); return rc; } if (IS_PF_SRIOV(hwfn)) { u16 vxlan_port, geneve_port; int j; vxlan_port = tun->vxlan_port.port; geneve_port = tun->geneve_port.port; qed_for_each_vf(hwfn, j) { qed_iov_bulletin_set_udp_ports(hwfn, j, vxlan_port, geneve_port); } qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); } if (IS_PF(cdev)) qed_ptt_release(hwfn, p_ptt); } return 0; } static int qed_configure_filter_rx_mode(struct qed_dev *cdev, enum qed_filter_rx_mode_type type) { struct qed_filter_accept_flags accept_flags; memset(&accept_flags, 0, sizeof(accept_flags)); accept_flags.update_rx_mode_config = 1; accept_flags.update_tx_mode_config = 1; accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | QED_ACCEPT_MCAST_MATCHED | QED_ACCEPT_BCAST; accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED | QED_ACCEPT_MCAST_MATCHED | QED_ACCEPT_BCAST; if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; } return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false, QED_SPQ_MODE_CB, NULL); } static int qed_configure_filter_ucast(struct qed_dev *cdev, struct qed_filter_ucast_params *params) { struct qed_filter_ucast ucast; if (!params->vlan_valid && !params->mac_valid) { DP_NOTICE(cdev, "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); return -EINVAL; } memset(&ucast, 0, sizeof(ucast)); switch (params->type) { case QED_FILTER_XCAST_TYPE_ADD: ucast.opcode = QED_FILTER_ADD; break; case QED_FILTER_XCAST_TYPE_DEL: ucast.opcode = QED_FILTER_REMOVE; break; case QED_FILTER_XCAST_TYPE_REPLACE: ucast.opcode = QED_FILTER_REPLACE; break; default: DP_NOTICE(cdev, "Unknown unicast filter type %d\n", params->type); } if (params->vlan_valid && params->mac_valid) { ucast.type = QED_FILTER_MAC_VLAN; ether_addr_copy(ucast.mac, params->mac); ucast.vlan = params->vlan; } else if (params->mac_valid) { ucast.type = QED_FILTER_MAC; ether_addr_copy(ucast.mac, params->mac); } else { ucast.type = QED_FILTER_VLAN; ucast.vlan = params->vlan; } ucast.is_rx_filter = true; ucast.is_tx_filter = true; return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL); } static int qed_configure_filter_mcast(struct qed_dev *cdev, struct qed_filter_mcast_params *params) { struct qed_filter_mcast mcast; int i; memset(&mcast, 0, sizeof(mcast)); switch (params->type) { case QED_FILTER_XCAST_TYPE_ADD: mcast.opcode = QED_FILTER_ADD; break; case QED_FILTER_XCAST_TYPE_DEL: mcast.opcode = QED_FILTER_REMOVE; break; default: DP_NOTICE(cdev, "Unknown multicast filter type %d\n", params->type); } mcast.num_mc_addrs = params->num; for (i = 0; i < mcast.num_mc_addrs; i++) ether_addr_copy(mcast.mac[i], params->mac[i]); return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL); } static int qed_configure_arfs_searcher(struct qed_dev *cdev, enum qed_filter_config_mode mode) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_arfs_config_params arfs_config_params; memset(&arfs_config_params, 0, sizeof(arfs_config_params)); arfs_config_params.tcp = true; arfs_config_params.udp = true; arfs_config_params.ipv4 = true; arfs_config_params.ipv6 = true; arfs_config_params.mode = mode; qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, &arfs_config_params); return 0; } static void qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, void *cookie, union event_ring_data *data, u8 fw_return_code) { struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common; void *dev = p_hwfn->cdev->ops_cookie; op->arfs_filter_op(dev, cookie, fw_return_code); } static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, struct qed_ntuple_filter_params *params) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_spq_comp_cb cb; int rc = -EINVAL; cb.function = qed_arfs_sp_response_handler; cb.cookie = cookie; if (params->b_is_vf) { if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false, false)) { DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n", params->vf_id); return rc; } params->vport_id = params->vf_id + 1; params->qid = QED_RFS_NTUPLE_QID_RSS; } rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params); if (rc) DP_NOTICE(p_hwfn, "Failed to issue a-RFS filter configuration\n"); else DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "Successfully issued a-RFS filter configuration\n"); return rc; } static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) { struct qed_queue_cid *p_cid = handle; struct qed_hwfn *p_hwfn; int rc; p_hwfn = p_cid->p_owner; rc = qed_get_queue_coalesce(p_hwfn, coal, handle); if (rc) DP_VERBOSE(cdev, QED_MSG_DEBUG, "Unable to read queue coalescing\n"); return rc; } static int qed_fp_cqe_completion(struct qed_dev *dev, u8 rss_id, struct eth_slow_path_rx_cqe *cqe) { return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns], cqe); } static int qed_req_bulletin_update_mac(struct qed_dev *cdev, const u8 *mac) { int i, ret; if (IS_PF(cdev)) return 0; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac); if (ret) return ret; } return 0; } static const struct qed_eth_ops qed_eth_ops_pass = { .common = &qed_common_ops_pass, #ifdef CONFIG_QED_SRIOV .iov = &qed_iov_ops_pass, #endif #ifdef CONFIG_DCB .dcb = &qed_dcbnl_ops_pass, #endif .ptp = &qed_ptp_ops_pass, .fill_dev_info = &qed_fill_eth_dev_info, .register_ops = &qed_register_eth_ops, .check_mac = &qed_check_mac, .vport_start = &qed_start_vport, .vport_stop = &qed_stop_vport, .vport_update = &qed_update_vport, .q_rx_start = &qed_start_rxq, .q_rx_stop = &qed_stop_rxq, .q_tx_start = &qed_start_txq, .q_tx_stop = &qed_stop_txq, .filter_config_rx_mode = &qed_configure_filter_rx_mode, .filter_config_ucast = &qed_configure_filter_ucast, .filter_config_mcast = &qed_configure_filter_mcast, .fastpath_stop = &qed_fastpath_stop, .eth_cqe_completion = &qed_fp_cqe_completion, .get_vport_stats = &qed_get_vport_stats, .tunn_config = &qed_tunn_configure, .ntuple_filter_config = &qed_ntuple_arfs_filter_config, .configure_arfs_searcher = &qed_configure_arfs_searcher, .get_coalesce = &qed_get_coalesce, .req_bulletin_update_mac = &qed_req_bulletin_update_mac, }; const struct qed_eth_ops *qed_get_eth_ops(void) { return &qed_eth_ops_pass; } EXPORT_SYMBOL(qed_get_eth_ops); void qed_put_eth_ops(void) { /* TODO - reference count for module? */ } EXPORT_SYMBOL(qed_put_eth_ops);
linux-master
drivers/net/ethernet/qlogic/qed/qed_l2.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <asm/byteorder.h> #include <linux/bitops.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/string.h> #include "qed.h" #include <linux/qed/qed_chain.h> #include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) { /* qed_spq_get_entry() can either get an entry from the free_pool, * or, if no entries are left, allocate a new entry and add it to * the unlimited_pending list. */ if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending) kfree(p_ent); else qed_spq_return_entry(p_hwfn, p_ent); } int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent, u8 cmd, u8 protocol, struct qed_sp_init_data *p_data) { u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid; struct qed_spq_entry *p_ent = NULL; int rc; if (!pp_ent) return -ENOMEM; rc = qed_spq_get_entry(p_hwfn, pp_ent); if (rc) return rc; p_ent = *pp_ent; p_ent->elem.hdr.cid = cpu_to_le32(opaque_cid); p_ent->elem.hdr.cmd_id = cmd; p_ent->elem.hdr.protocol_id = protocol; p_ent->priority = QED_SPQ_PRIORITY_NORMAL; p_ent->comp_mode = p_data->comp_mode; p_ent->comp_done.done = 0; switch (p_ent->comp_mode) { case QED_SPQ_MODE_EBLOCK: p_ent->comp_cb.cookie = &p_ent->comp_done; break; case QED_SPQ_MODE_BLOCK: if (!p_data->p_comp_data) goto err; p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; break; case QED_SPQ_MODE_CB: if (!p_data->p_comp_data) p_ent->comp_cb.function = NULL; else p_ent->comp_cb = *p_data->p_comp_data; break; default: DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", p_ent->comp_mode); goto err; } DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Initialized: CID %08x %s:[%02x] %s:%02x data_addr %llx comp_mode [%s]\n", opaque_cid, qed_get_ramrod_cmd_id_str(protocol, cmd), cmd, qed_get_protocol_type_str(protocol), protocol, (unsigned long long)(uintptr_t)&p_ent->ramrod, D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", "MODE_CB")); memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); return 0; err: qed_sp_destroy_request(p_hwfn, p_ent); return -EINVAL; } static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type) { switch (type) { case QED_TUNN_CLSS_MAC_VLAN: return TUNNEL_CLSS_MAC_VLAN; case QED_TUNN_CLSS_MAC_VNI: return TUNNEL_CLSS_MAC_VNI; case QED_TUNN_CLSS_INNER_MAC_VLAN: return TUNNEL_CLSS_INNER_MAC_VLAN; case QED_TUNN_CLSS_INNER_MAC_VNI: return TUNNEL_CLSS_INNER_MAC_VNI; case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE: return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE; default: return TUNNEL_CLSS_MAC_VLAN; } } static void qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun, struct qed_tunnel_info *p_src, bool b_pf_start) { if (p_src->vxlan.b_update_mode || b_pf_start) p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled; if (p_src->l2_gre.b_update_mode || b_pf_start) p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled; if (p_src->ip_gre.b_update_mode || b_pf_start) p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled; if (p_src->l2_geneve.b_update_mode || b_pf_start) p_tun->l2_geneve.b_mode_enabled = p_src->l2_geneve.b_mode_enabled; if (p_src->ip_geneve.b_update_mode || b_pf_start) p_tun->ip_geneve.b_mode_enabled = p_src->ip_geneve.b_mode_enabled; } static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun, struct qed_tunnel_info *p_src) { int type; p_tun->b_update_rx_cls = p_src->b_update_rx_cls; p_tun->b_update_tx_cls = p_src->b_update_tx_cls; type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls); p_tun->vxlan.tun_cls = type; type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls); p_tun->l2_gre.tun_cls = type; type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls); p_tun->ip_gre.tun_cls = type; type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls); p_tun->l2_geneve.tun_cls = type; type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls); p_tun->ip_geneve.tun_cls = type; } static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun, struct qed_tunnel_info *p_src) { p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port; p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port; if (p_src->geneve_port.b_update_port) p_tun->geneve_port.port = p_src->geneve_port.port; if (p_src->vxlan_port.b_update_port) p_tun->vxlan_port.port = p_src->vxlan_port.port; } static void __qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, struct qed_tunn_update_type *tun_type) { *p_tunn_cls = tun_type->tun_cls; } static void qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, struct qed_tunn_update_type *tun_type, u8 *p_update_port, __le16 *p_port, struct qed_tunn_update_udp_port *p_udp_port) { __qed_set_ramrod_tunnel_param(p_tunn_cls, tun_type); if (p_udp_port->b_update_port) { *p_update_port = 1; *p_port = cpu_to_le16(p_udp_port->port); } } static void qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn, struct qed_tunnel_info *p_src, struct pf_update_tunnel_config *p_tunn_cfg) { struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; qed_set_pf_update_tunn_mode(p_tun, p_src, false); qed_set_tunn_cls_info(p_tun, p_src); qed_set_tunn_ports(p_tun, p_src); qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, &p_tun->vxlan, &p_tunn_cfg->set_vxlan_udp_port_flg, &p_tunn_cfg->vxlan_udp_port, &p_tun->vxlan_port); qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, &p_tun->l2_geneve, &p_tunn_cfg->set_geneve_udp_port_flg, &p_tunn_cfg->geneve_udp_port, &p_tun->geneve_port); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, &p_tun->ip_geneve); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, &p_tun->l2_gre); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, &p_tun->ip_gre); p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls; } static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tun) { qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled, p_tun->ip_gre.b_mode_enabled); qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled); qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled, p_tun->ip_geneve.b_mode_enabled); } static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn) { if (p_tunn->vxlan_port.b_update_port) qed_set_vxlan_dest_port(p_hwfn, p_ptt, p_tunn->vxlan_port.port); if (p_tunn->geneve_port.b_update_port) qed_set_geneve_dest_port(p_hwfn, p_ptt, p_tunn->geneve_port.port); qed_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn); } static void qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, struct qed_tunnel_info *p_src, struct pf_start_tunnel_config *p_tunn_cfg) { struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; if (!p_src) return; qed_set_pf_update_tunn_mode(p_tun, p_src, true); qed_set_tunn_cls_info(p_tun, p_src); qed_set_tunn_ports(p_tun, p_src); qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, &p_tun->vxlan, &p_tunn_cfg->set_vxlan_udp_port_flg, &p_tunn_cfg->vxlan_udp_port, &p_tun->vxlan_port); qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, &p_tun->l2_geneve, &p_tunn_cfg->set_geneve_udp_port_flg, &p_tunn_cfg->geneve_udp_port, &p_tun->geneve_port); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, &p_tun->ip_geneve); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, &p_tun->l2_gre); __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, &p_tun->ip_gre); } int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn, bool allow_npar_tx_switch) { struct outer_tag_config_struct *outer_tag_config; struct pf_start_ramrod_data *p_ramrod = NULL; u16 sb = qed_int_get_sp_sb_id(p_hwfn); u8 sb_index = p_hwfn->p_eq->eq_sb_index; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u8 page_cnt, i; int rc; /* update initial eq producer */ qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(&p_hwfn->p_eq->chain)); memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_START, PROTOCOLID_COMMON, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.pf_start; p_ramrod->event_ring_sb_id = cpu_to_le16(sb); p_ramrod->event_ring_sb_index = sb_index; p_ramrod->path_id = QED_PATH_ID(p_hwfn); p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = cpu_to_le16(0xf); if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) p_ramrod->mf_mode = MF_OVLAN; else p_ramrod->mf_mode = MF_NPAR; outer_tag_config = &p_ramrod->outer_tag_config; outer_tag_config->outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan); if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) { outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021Q); } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021AD); outer_tag_config->enable_stag_pri_change = 1; } outer_tag_config->pri_map_valid = 1; for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) outer_tag_config->inner_to_outer_pri_map[i] = i; /* enable_stag_pri_change should be set if port is in BD mode or, * UFP with Host Control mode. */ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) { if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) outer_tag_config->enable_stag_pri_change = 1; else outer_tag_config->enable_stag_pri_change = 0; outer_tag_config->outer_tag.tci |= cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); } /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain)); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); p_ramrod->event_ring_num_pages = page_cnt; /* Place consolidation queue address in ramrod */ DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_base_addr, qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain)); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_consq->chain); p_ramrod->consolid_q_num_pages = page_cnt; qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits)) p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; switch (p_hwfn->hw_info.personality) { case QED_PCI_ETH: p_ramrod->personality = PERSONALITY_ETH; break; case QED_PCI_FCOE: p_ramrod->personality = PERSONALITY_FCOE; break; case QED_PCI_ISCSI: case QED_PCI_NVMETCP: p_ramrod->personality = PERSONALITY_TCP_ULP; break; case QED_PCI_ETH_ROCE: case QED_PCI_ETH_IWARP: p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; break; default: DP_NOTICE(p_hwfn, "Unknown personality %d\n", p_hwfn->hw_info.personality); p_ramrod->personality = PERSONALITY_ETH; } if (p_hwfn->cdev->p_iov_info) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf; p_ramrod->num_vfs = (u8)p_iov->total_vfs; } p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n", sb, sb_index, outer_tag_config->outer_tag.tci); rc = qed_spq_post(p_hwfn, p_ent, NULL); if (p_tunn) qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel); return rc; } int qed_sp_pf_update(struct qed_hwfn *p_hwfn) { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_CB; rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, &init_data); if (rc) return rc; qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results, &p_ent->ramrod.pf_update); return qed_spq_post(p_hwfn, p_ent, NULL); } int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn) { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc; if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) { DP_INFO(p_hwfn, "Invalid priority type %d\n", p_hwfn->ufp_info.pri_type); return -EINVAL; } /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_CB; rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, &init_data); if (rc) return rc; p_ent->ramrod.pf_update.update_enable_stag_pri_change = true; if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) p_ent->ramrod.pf_update.enable_stag_pri_change = 1; else p_ent->ramrod.pf_update.enable_stag_pri_change = 0; return qed_spq_post(p_hwfn, p_ent, NULL); } /* Set pf update ramrod command params */ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data) { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc; if (IS_VF(p_hwfn->cdev)) return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn); if (!p_tunn) return -EINVAL; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_data; rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, &init_data); if (rc) return rc; qed_tunn_set_pf_update_params(p_hwfn, p_tunn, &p_ent->ramrod.pf_update.tunnel_config); rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) return rc; qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel); return rc; } int qed_sp_pf_stop(struct qed_hwfn *p_hwfn) { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON, &init_data); if (rc) return rc; return qed_spq_post(p_hwfn, p_ent, NULL); } int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn) { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON, &init_data); if (rc) return rc; return qed_spq_post(p_hwfn, p_ent, NULL); } int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn) { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_CB; rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, &init_data); if (rc) return rc; p_ent->ramrod.pf_update.update_mf_vlan_flag = true; p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) p_ent->ramrod.pf_update.mf_vlan |= cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); return qed_spq_post(p_hwfn, p_ent, NULL); }
linux-master
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* Copyright (c) 2020 Marvell International Ltd. */ #include <linux/dma-mapping.h> #include <linux/qed/qed_chain.h> #include <linux/vmalloc.h> #include "qed_dev_api.h" static void qed_chain_init(struct qed_chain *chain, const struct qed_chain_init_params *params, u32 page_cnt) { memset(chain, 0, sizeof(*chain)); chain->elem_size = params->elem_size; chain->intended_use = params->intended_use; chain->mode = params->mode; chain->cnt_type = params->cnt_type; chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size, params->page_size); chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size, params->page_size, params->mode); chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size, params->mode); chain->elem_per_page_mask = chain->elem_per_page - 1; chain->next_page_mask = chain->usable_per_page & chain->elem_per_page_mask; chain->page_size = params->page_size; chain->page_cnt = page_cnt; chain->capacity = chain->usable_per_page * page_cnt; chain->size = chain->elem_per_page * page_cnt; if (params->ext_pbl_virt) { chain->pbl_sp.table_virt = params->ext_pbl_virt; chain->pbl_sp.table_phys = params->ext_pbl_phys; chain->b_external_pbl = true; } } static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain, void *virt_curr, void *virt_next, dma_addr_t phys_next) { struct qed_chain_next *next; u32 size; size = chain->elem_size * chain->usable_per_page; next = virt_curr + size; DMA_REGPAIR_LE(next->next_phys, phys_next); next->next_virt = virt_next; } static void qed_chain_init_mem(struct qed_chain *chain, void *virt_addr, dma_addr_t phys_addr) { chain->p_virt_addr = virt_addr; chain->p_phys_addr = phys_addr; } static void qed_chain_free_next_ptr(struct qed_dev *cdev, struct qed_chain *chain) { struct device *dev = &cdev->pdev->dev; struct qed_chain_next *next; dma_addr_t phys, phys_next; void *virt, *virt_next; u32 size, i; size = chain->elem_size * chain->usable_per_page; virt = chain->p_virt_addr; phys = chain->p_phys_addr; for (i = 0; i < chain->page_cnt; i++) { if (!virt) break; next = virt + size; virt_next = next->next_virt; phys_next = HILO_DMA_REGPAIR(next->next_phys); dma_free_coherent(dev, chain->page_size, virt, phys); virt = virt_next; phys = phys_next; } } static void qed_chain_free_single(struct qed_dev *cdev, struct qed_chain *chain) { if (!chain->p_virt_addr) return; dma_free_coherent(&cdev->pdev->dev, chain->page_size, chain->p_virt_addr, chain->p_phys_addr); } static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain) { struct device *dev = &cdev->pdev->dev; struct addr_tbl_entry *entry; u32 i; if (!chain->pbl.pp_addr_tbl) return; for (i = 0; i < chain->page_cnt; i++) { entry = chain->pbl.pp_addr_tbl + i; if (!entry->virt_addr) break; dma_free_coherent(dev, chain->page_size, entry->virt_addr, entry->dma_map); } if (!chain->b_external_pbl) dma_free_coherent(dev, chain->pbl_sp.table_size, chain->pbl_sp.table_virt, chain->pbl_sp.table_phys); vfree(chain->pbl.pp_addr_tbl); chain->pbl.pp_addr_tbl = NULL; } /** * qed_chain_free() - Free chain DMA memory. * * @cdev: Main device structure. * @chain: Chain to free. */ void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain) { switch (chain->mode) { case QED_CHAIN_MODE_NEXT_PTR: qed_chain_free_next_ptr(cdev, chain); break; case QED_CHAIN_MODE_SINGLE: qed_chain_free_single(cdev, chain); break; case QED_CHAIN_MODE_PBL: qed_chain_free_pbl(cdev, chain); break; default: return; } qed_chain_init_mem(chain, NULL, 0); } static int qed_chain_alloc_sanity_check(struct qed_dev *cdev, const struct qed_chain_init_params *params, u32 page_cnt) { u64 chain_size; chain_size = ELEMS_PER_PAGE(params->elem_size, params->page_size); chain_size *= page_cnt; if (!chain_size) return -EINVAL; /* The actual chain size can be larger than the maximal possible value * after rounding up the requested elements number to pages, and after * taking into account the unusuable elements (next-ptr elements). * The size of a "u16" chain can be (U16_MAX + 1) since the chain * size/capacity fields are of u32 type. */ switch (params->cnt_type) { case QED_CHAIN_CNT_TYPE_U16: if (chain_size > U16_MAX + 1) break; return 0; case QED_CHAIN_CNT_TYPE_U32: if (chain_size > U32_MAX) break; return 0; default: return -EINVAL; } DP_NOTICE(cdev, "The actual chain size (0x%llx) is larger than the maximal possible value\n", chain_size); return -EINVAL; } static int qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *chain) { struct device *dev = &cdev->pdev->dev; void *virt, *virt_prev = NULL; dma_addr_t phys; u32 i; for (i = 0; i < chain->page_cnt; i++) { virt = dma_alloc_coherent(dev, chain->page_size, &phys, GFP_KERNEL); if (!virt) return -ENOMEM; if (i == 0) { qed_chain_init_mem(chain, virt, phys); qed_chain_reset(chain); } else { qed_chain_init_next_ptr_elem(chain, virt_prev, virt, phys); } virt_prev = virt; } /* Last page's next element should point to the beginning of the * chain. */ qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr, chain->p_phys_addr); return 0; } static int qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *chain) { dma_addr_t phys; void *virt; virt = dma_alloc_coherent(&cdev->pdev->dev, chain->page_size, &phys, GFP_KERNEL); if (!virt) return -ENOMEM; qed_chain_init_mem(chain, virt, phys); qed_chain_reset(chain); return 0; } static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain) { struct device *dev = &cdev->pdev->dev; struct addr_tbl_entry *addr_tbl; dma_addr_t phys, pbl_phys; __le64 *pbl_virt; u32 page_cnt, i; size_t size; void *virt; page_cnt = chain->page_cnt; size = array_size(page_cnt, sizeof(*addr_tbl)); if (unlikely(size == SIZE_MAX)) return -EOVERFLOW; addr_tbl = vzalloc(size); if (!addr_tbl) return -ENOMEM; chain->pbl.pp_addr_tbl = addr_tbl; if (chain->b_external_pbl) { pbl_virt = chain->pbl_sp.table_virt; goto alloc_pages; } size = array_size(page_cnt, sizeof(*pbl_virt)); if (unlikely(size == SIZE_MAX)) return -EOVERFLOW; pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys, GFP_KERNEL); if (!pbl_virt) return -ENOMEM; chain->pbl_sp.table_virt = pbl_virt; chain->pbl_sp.table_phys = pbl_phys; chain->pbl_sp.table_size = size; alloc_pages: for (i = 0; i < page_cnt; i++) { virt = dma_alloc_coherent(dev, chain->page_size, &phys, GFP_KERNEL); if (!virt) return -ENOMEM; if (i == 0) { qed_chain_init_mem(chain, virt, phys); qed_chain_reset(chain); } /* Fill the PBL table with the physical address of the page */ pbl_virt[i] = cpu_to_le64(phys); /* Keep the virtual address of the page */ addr_tbl[i].virt_addr = virt; addr_tbl[i].dma_map = phys; } return 0; } /** * qed_chain_alloc() - Allocate and initialize a chain. * * @cdev: Main device structure. * @chain: Chain to be processed. * @params: Chain initialization parameters. * * Return: 0 on success, negative errno otherwise. */ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, struct qed_chain_init_params *params) { u32 page_cnt; int rc; if (!params->page_size) params->page_size = QED_CHAIN_PAGE_SIZE; if (params->mode == QED_CHAIN_MODE_SINGLE) page_cnt = 1; else page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems, params->elem_size, params->page_size, params->mode); rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt); if (rc) { DP_NOTICE(cdev, "Cannot allocate a chain with the given arguments:\n"); DP_NOTICE(cdev, "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu, page_size %u]\n", params->intended_use, params->mode, params->cnt_type, params->num_elems, params->elem_size, params->page_size); return rc; } qed_chain_init(chain, params, page_cnt); switch (params->mode) { case QED_CHAIN_MODE_NEXT_PTR: rc = qed_chain_alloc_next_ptr(cdev, chain); break; case QED_CHAIN_MODE_SINGLE: rc = qed_chain_alloc_single(cdev, chain); break; case QED_CHAIN_MODE_PBL: rc = qed_chain_alloc_pbl(cdev, chain); break; default: return -EINVAL; } if (!rc) return 0; qed_chain_free(cdev, chain); return rc; }
linux-master
drivers/net/ethernet/qlogic/qed/qed_chain.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation * Copyright (c) 2019-2021 Marvell International Ltd. */ #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/crc32.h> #include "qed.h" #include "qed_cxt.h" #include "qed_hsi.h" #include "qed_dbg_hsi.h" #include "qed_hw.h" #include "qed_mcp.h" #include "qed_reg_addr.h" /* Memory groups enum */ enum mem_groups { MEM_GROUP_PXP_MEM, MEM_GROUP_DMAE_MEM, MEM_GROUP_CM_MEM, MEM_GROUP_QM_MEM, MEM_GROUP_DORQ_MEM, MEM_GROUP_BRB_RAM, MEM_GROUP_BRB_MEM, MEM_GROUP_PRS_MEM, MEM_GROUP_SDM_MEM, MEM_GROUP_PBUF, MEM_GROUP_IOR, MEM_GROUP_RAM, MEM_GROUP_BTB_RAM, MEM_GROUP_RDIF_CTX, MEM_GROUP_TDIF_CTX, MEM_GROUP_CFC_MEM, MEM_GROUP_CONN_CFC_MEM, MEM_GROUP_CAU_PI, MEM_GROUP_CAU_MEM, MEM_GROUP_CAU_MEM_EXT, MEM_GROUP_PXP_ILT, MEM_GROUP_MULD_MEM, MEM_GROUP_BTB_MEM, MEM_GROUP_IGU_MEM, MEM_GROUP_IGU_MSIX, MEM_GROUP_CAU_SB, MEM_GROUP_BMB_RAM, MEM_GROUP_BMB_MEM, MEM_GROUP_TM_MEM, MEM_GROUP_TASK_CFC_MEM, MEM_GROUPS_NUM }; /* Memory groups names */ static const char * const s_mem_group_names[] = { "PXP_MEM", "DMAE_MEM", "CM_MEM", "QM_MEM", "DORQ_MEM", "BRB_RAM", "BRB_MEM", "PRS_MEM", "SDM_MEM", "PBUF", "IOR", "RAM", "BTB_RAM", "RDIF_CTX", "TDIF_CTX", "CFC_MEM", "CONN_CFC_MEM", "CAU_PI", "CAU_MEM", "CAU_MEM_EXT", "PXP_ILT", "MULD_MEM", "BTB_MEM", "IGU_MEM", "IGU_MSIX", "CAU_SB", "BMB_RAM", "BMB_MEM", "TM_MEM", "TASK_CFC_MEM", }; /* Idle check conditions */ static u32 cond5(const u32 *r, const u32 *imm) { return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]); } static u32 cond7(const u32 *r, const u32 *imm) { return ((r[0] >> imm[0]) & imm[1]) != imm[2]; } static u32 cond6(const u32 *r, const u32 *imm) { return (r[0] & imm[0]) != imm[1]; } static u32 cond9(const u32 *r, const u32 *imm) { return ((r[0] & imm[0]) >> imm[1]) != (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5])); } static u32 cond10(const u32 *r, const u32 *imm) { return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]); } static u32 cond4(const u32 *r, const u32 *imm) { return (r[0] & ~imm[0]) != imm[1]; } static u32 cond0(const u32 *r, const u32 *imm) { return (r[0] & ~r[1]) != imm[0]; } static u32 cond14(const u32 *r, const u32 *imm) { return (r[0] | imm[0]) != imm[1]; } static u32 cond1(const u32 *r, const u32 *imm) { return r[0] != imm[0]; } static u32 cond11(const u32 *r, const u32 *imm) { return r[0] != r[1] && r[2] == imm[0]; } static u32 cond12(const u32 *r, const u32 *imm) { return r[0] != r[1] && r[2] > imm[0]; } static u32 cond3(const u32 *r, const u32 *imm) { return r[0] != r[1]; } static u32 cond13(const u32 *r, const u32 *imm) { return r[0] & imm[0]; } static u32 cond8(const u32 *r, const u32 *imm) { return r[0] < (r[1] - imm[0]); } static u32 cond2(const u32 *r, const u32 *imm) { return r[0] > imm[0]; } /* Array of Idle Check conditions */ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = { cond0, cond1, cond2, cond3, cond4, cond5, cond6, cond7, cond8, cond9, cond10, cond11, cond12, cond13, cond14, }; #define NUM_PHYS_BLOCKS 84 #define NUM_DBG_RESET_REGS 8 /******************************* Data Types **********************************/ enum hw_types { HW_TYPE_ASIC, PLATFORM_RESERVED, PLATFORM_RESERVED2, PLATFORM_RESERVED3, PLATFORM_RESERVED4, MAX_HW_TYPES }; /* CM context types */ enum cm_ctx_types { CM_CTX_CONN_AG, CM_CTX_CONN_ST, CM_CTX_TASK_AG, CM_CTX_TASK_ST, NUM_CM_CTX_TYPES }; /* Debug bus frame modes */ enum dbg_bus_frame_modes { DBG_BUS_FRAME_MODE_4ST = 0, /* 4 Storm dwords (no HW) */ DBG_BUS_FRAME_MODE_2ST_2HW = 1, /* 2 Storm dwords, 2 HW dwords */ DBG_BUS_FRAME_MODE_1ST_3HW = 2, /* 1 Storm dwords, 3 HW dwords */ DBG_BUS_FRAME_MODE_4HW = 3, /* 4 HW dwords (no Storms) */ DBG_BUS_FRAME_MODE_8HW = 4, /* 8 HW dwords (no Storms) */ DBG_BUS_NUM_FRAME_MODES }; /* Debug bus SEMI frame modes */ enum dbg_bus_semi_frame_modes { DBG_BUS_SEMI_FRAME_MODE_4FAST = 0, /* 4 fast dw */ DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW = 1, /* 2 fast dw, 2 slow dw */ DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW = 2, /* 1 fast dw,3 slow dw */ DBG_BUS_SEMI_FRAME_MODE_4SLOW = 3, /* 4 slow dw */ DBG_BUS_SEMI_NUM_FRAME_MODES }; /* Debug bus filter types */ enum dbg_bus_filter_types { DBG_BUS_FILTER_TYPE_OFF, /* Filter always off */ DBG_BUS_FILTER_TYPE_PRE, /* Filter before trigger only */ DBG_BUS_FILTER_TYPE_POST, /* Filter after trigger only */ DBG_BUS_FILTER_TYPE_ON /* Filter always on */ }; /* Debug bus pre-trigger recording types */ enum dbg_bus_pre_trigger_types { DBG_BUS_PRE_TRIGGER_FROM_ZERO, /* Record from time 0 */ DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, /* Record some chunks before trigger */ DBG_BUS_PRE_TRIGGER_DROP /* Drop data before trigger */ }; /* Debug bus post-trigger recording types */ enum dbg_bus_post_trigger_types { DBG_BUS_POST_TRIGGER_RECORD, /* Start recording after trigger */ DBG_BUS_POST_TRIGGER_DROP /* Drop data after trigger */ }; /* Debug bus other engine mode */ enum dbg_bus_other_engine_modes { DBG_BUS_OTHER_ENGINE_MODE_NONE, DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX, DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX }; /* DBG block Framing mode definitions */ struct framing_mode_defs { u8 id; u8 blocks_dword_mask; u8 storms_dword_mask; u8 semi_framing_mode_id; u8 full_buf_thr; }; /* Chip constant definitions */ struct chip_defs { const char *name; u8 dwords_per_cycle; u8 num_framing_modes; u32 num_ilt_pages; struct framing_mode_defs *framing_modes; }; /* HW type constant definitions */ struct hw_type_defs { const char *name; u32 delay_factor; u32 dmae_thresh; u32 log_thresh; }; /* RBC reset definitions */ struct rbc_reset_defs { u32 reset_reg_addr; u32 reset_val[MAX_CHIP_IDS]; }; /* Storm constant definitions. * Addresses are in bytes, sizes are in quad-regs. */ struct storm_defs { char letter; enum block_id sem_block_id; enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS]; bool has_vfc; u32 sem_fast_mem_addr; u32 sem_frame_mode_addr; u32 sem_slow_enable_addr; u32 sem_slow_mode_addr; u32 sem_slow_mode1_conf_addr; u32 sem_sync_dbg_empty_addr; u32 sem_gpre_vect_addr; u32 cm_ctx_wr_addr; u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES]; u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES]; }; /* Debug Bus Constraint operation constant definitions */ struct dbg_bus_constraint_op_defs { u8 hw_op_val; bool is_cyclic; }; /* Storm Mode definitions */ struct storm_mode_defs { const char *name; bool is_fast_dbg; u8 id_in_hw; u32 src_disable_reg_addr; u32 src_enable_val; bool exists[MAX_CHIP_IDS]; }; struct grc_param_defs { u32 default_val[MAX_CHIP_IDS]; u32 min; u32 max; bool is_preset; bool is_persistent; u32 exclude_all_preset_val; u32 crash_preset_val[MAX_CHIP_IDS]; }; /* Address is in 128b units. Width is in bits. */ struct rss_mem_defs { const char *mem_name; const char *type_name; u32 addr; u32 entry_width; u32 num_entries[MAX_CHIP_IDS]; }; struct vfc_ram_defs { const char *mem_name; const char *type_name; u32 base_row; u32 num_rows; }; struct big_ram_defs { const char *instance_name; enum mem_groups mem_group_id; enum mem_groups ram_mem_group_id; enum dbg_grc_params grc_param; u32 addr_reg_addr; u32 data_reg_addr; u32 is_256b_reg_addr; u32 is_256b_bit_offset[MAX_CHIP_IDS]; u32 ram_size[MAX_CHIP_IDS]; /* In dwords */ }; struct phy_defs { const char *phy_name; /* PHY base GRC address */ u32 base_addr; /* Relative address of indirect TBUS address register (bits 0..7) */ u32 tbus_addr_lo_addr; /* Relative address of indirect TBUS address register (bits 8..10) */ u32 tbus_addr_hi_addr; /* Relative address of indirect TBUS data register (bits 0..7) */ u32 tbus_data_lo_addr; /* Relative address of indirect TBUS data register (bits 8..11) */ u32 tbus_data_hi_addr; }; /* Split type definitions */ struct split_type_defs { const char *name; }; /******************************** Constants **********************************/ #define BYTES_IN_DWORD sizeof(u32) /* In the macros below, size and offset are specified in bits */ #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32) #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET #define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE #define FIELD_DWORD_OFFSET(type, field) \ ((int)(FIELD_BIT_OFFSET(type, field) / 32)) #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32) #define FIELD_BIT_MASK(type, field) \ (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \ FIELD_DWORD_SHIFT(type, field)) #define SET_VAR_FIELD(var, type, field, val) \ do { \ var[FIELD_DWORD_OFFSET(type, field)] &= \ (~FIELD_BIT_MASK(type, field)); \ var[FIELD_DWORD_OFFSET(type, field)] |= \ (val) << FIELD_DWORD_SHIFT(type, field); \ } while (0) #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \ do { \ for (i = 0; i < (arr_size); i++) \ qed_wr(dev, ptt, addr, (arr)[i]); \ } while (0) #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD) #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD) /* extra lines include a signature line + optional latency events line */ #define NUM_EXTRA_DBG_LINES(block) \ (GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1) #define NUM_DBG_LINES(block) \ ((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block)) #define USE_DMAE true #define PROTECT_WIDE_BUS true #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2) #define RAM_LINES_TO_BYTES(lines) \ DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines)) #define REG_DUMP_LEN_SHIFT 24 #define MEM_DUMP_ENTRY_SIZE_DWORDS \ BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem)) #define IDLE_CHK_RULE_SIZE_DWORDS \ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule)) #define IDLE_CHK_RESULT_HDR_DWORDS \ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr)) #define IDLE_CHK_RESULT_REG_HDR_DWORDS \ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr)) #define PAGE_MEM_DESC_SIZE_DWORDS \ BYTES_TO_DWORDS(sizeof(struct phys_mem_desc)) #define IDLE_CHK_MAX_ENTRIES_SIZE 32 /* The sizes and offsets below are specified in bits */ #define VFC_CAM_CMD_STRUCT_SIZE 64 #define VFC_CAM_CMD_ROW_OFFSET 48 #define VFC_CAM_CMD_ROW_SIZE 9 #define VFC_CAM_ADDR_STRUCT_SIZE 16 #define VFC_CAM_ADDR_OP_OFFSET 0 #define VFC_CAM_ADDR_OP_SIZE 4 #define VFC_CAM_RESP_STRUCT_SIZE 256 #define VFC_RAM_ADDR_STRUCT_SIZE 16 #define VFC_RAM_ADDR_OP_OFFSET 0 #define VFC_RAM_ADDR_OP_SIZE 2 #define VFC_RAM_ADDR_ROW_OFFSET 2 #define VFC_RAM_ADDR_ROW_SIZE 10 #define VFC_RAM_RESP_STRUCT_SIZE 256 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE) #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE) #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE) #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE) #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE) #define NUM_VFC_RAM_TYPES 4 #define VFC_CAM_NUM_ROWS 512 #define VFC_OPCODE_CAM_RD 14 #define VFC_OPCODE_RAM_RD 0 #define NUM_RSS_MEM_TYPES 5 #define NUM_BIG_RAM_TYPES 3 #define BIG_RAM_NAME_LEN 3 #define NUM_PHY_TBUS_ADDRESSES 2048 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2) #define RESET_REG_UNRESET_OFFSET 4 #define STALL_DELAY_MS 500 #define STATIC_DEBUG_LINE_DWORDS 9 #define NUM_COMMON_GLOBAL_PARAMS 10 #define MAX_RECURSION_DEPTH 10 #define FW_IMG_KUKU 0 #define FW_IMG_MAIN 1 #define FW_IMG_L2B 2 #define REG_FIFO_ELEMENT_DWORDS 2 #define REG_FIFO_DEPTH_ELEMENTS 32 #define REG_FIFO_DEPTH_DWORDS \ (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS) #define IGU_FIFO_ELEMENT_DWORDS 4 #define IGU_FIFO_DEPTH_ELEMENTS 64 #define IGU_FIFO_DEPTH_DWORDS \ (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS) #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \ (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \ PROTECTION_OVERRIDE_ELEMENT_DWORDS) #define MCP_SPAD_TRACE_OFFSIZE_ADDR \ (MCP_REG_SCRATCH + \ offsetof(struct static_init, sections[SPAD_SECTION_TRACE])) #define MAX_SW_PLTAFORM_STR_SIZE 64 #define EMPTY_FW_VERSION_STR "???_???_???_???" #define EMPTY_FW_IMAGE_STR "???????????????" /***************************** Constant Arrays *******************************/ /* DBG block framing mode definitions, in descending preference order */ static struct framing_mode_defs s_framing_mode_defs[4] = { {DBG_BUS_FRAME_MODE_4ST, 0x0, 0xf, DBG_BUS_SEMI_FRAME_MODE_4FAST, 10}, {DBG_BUS_FRAME_MODE_4HW, 0xf, 0x0, DBG_BUS_SEMI_FRAME_MODE_4SLOW, 10}, {DBG_BUS_FRAME_MODE_2ST_2HW, 0x3, 0xc, DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW, 10}, {DBG_BUS_FRAME_MODE_1ST_3HW, 0x7, 0x8, DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW, 10} }; /* Chip constant definitions array */ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { {"bb", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2, s_framing_mode_defs}, {"ah", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2, s_framing_mode_defs} }; /* Storm constant definitions array */ static struct storm_defs s_storm_defs[] = { /* Tstorm */ {'T', BLOCK_TSEM, {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true, TSEM_REG_FAST_MEMORY, TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE, TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG, TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT, TCM_REG_CTX_RBC_ACCS, {TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX, TCM_REG_SM_TASK_CTX}, {{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */ }, /* Mstorm */ {'M', BLOCK_MSEM, {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false, MSEM_REG_FAST_MEMORY, MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE, MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG, MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_DBG_GPRE_VECT, MCM_REG_CTX_RBC_ACCS, {MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX, MCM_REG_SM_TASK_CTX }, {{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/ }, /* Ustorm */ {'U', BLOCK_USEM, {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false, USEM_REG_FAST_MEMORY, USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE, USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG, USEM_REG_SYNC_DBG_EMPTY, USEM_REG_DBG_GPRE_VECT, UCM_REG_CTX_RBC_ACCS, {UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX, UCM_REG_SM_TASK_CTX}, {{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */ }, /* Xstorm */ {'X', BLOCK_XSEM, {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false, XSEM_REG_FAST_MEMORY, XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE, XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG, XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_DBG_GPRE_VECT, XCM_REG_CTX_RBC_ACCS, {XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0}, {{9, 15, 0, 0}, {9, 15, 0, 0}} /* {bb} {k2} */ }, /* Ystorm */ {'Y', BLOCK_YSEM, {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false, YSEM_REG_FAST_MEMORY, YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE, YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG, YSEM_REG_SYNC_DBG_EMPTY, YSEM_REG_DBG_GPRE_VECT, YCM_REG_CTX_RBC_ACCS, {YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX, YCM_REG_SM_TASK_CTX}, {{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */ }, /* Pstorm */ {'P', BLOCK_PSEM, {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true, PSEM_REG_FAST_MEMORY, PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE, PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG, PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_DBG_GPRE_VECT, PCM_REG_CTX_RBC_ACCS, {0, PCM_REG_SM_CON_CTX, 0, 0}, {{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */ }, }; static struct hw_type_defs s_hw_type_defs[] = { /* HW_TYPE_ASIC */ {"asic", 1, 256, 32768}, {"reserved", 0, 0, 0}, {"reserved2", 0, 0, 0}, {"reserved3", 0, 0, 0}, {"reserved4", 0, 0, 0} }; static struct grc_param_defs s_grc_param_defs[] = { /* DBG_GRC_PARAM_DUMP_TSTORM */ {{1, 1}, 0, 1, false, false, 1, {1, 1}}, /* DBG_GRC_PARAM_DUMP_MSTORM */ {{1, 1}, 0, 1, false, false, 1, {1, 1}}, /* DBG_GRC_PARAM_DUMP_USTORM */ {{1, 1}, 0, 1, false, false, 1, {1, 1}}, /* DBG_GRC_PARAM_DUMP_XSTORM */ {{1, 1}, 0, 1, false, false, 1, {1, 1}}, /* DBG_GRC_PARAM_DUMP_YSTORM */ {{1, 1}, 0, 1, false, false, 1, {1, 1}}, /* DBG_GRC_PARAM_DUMP_PSTORM */ {{1, 1}, 0, 1, false, false, 1, {1, 1}}, /* DBG_GRC_PARAM_DUMP_REGS */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_RAM */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_PBUF */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_IOR */ {{0, 0}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_VFC */ {{0, 0}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_CM_CTX */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_ILT */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_RSS */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_CAU */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_QM */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_MCP */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_DORQ */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_CFC */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_IGU */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_BRB */ {{0, 0}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_BTB */ {{0, 0}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_BMB */ {{0, 0}, 0, 1, false, false, 0, {0, 0}}, /* DBG_GRC_PARAM_RESERVED1 */ {{0, 0}, 0, 1, false, false, 0, {0, 0}}, /* DBG_GRC_PARAM_DUMP_MULD */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_PRS */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_DMAE */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_TM */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_SDM */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_DIF */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_STATIC */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_UNSTALL */ {{0, 0}, 0, 1, false, false, 0, {0, 0}}, /* DBG_GRC_PARAM_RESERVED2 */ {{0, 0}, 0, 1, false, false, 0, {0, 0}}, /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */ {{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}}, /* DBG_GRC_PARAM_EXCLUDE_ALL */ {{0, 0}, 0, 1, true, false, 0, {0, 0}}, /* DBG_GRC_PARAM_CRASH */ {{0, 0}, 0, 1, true, false, 0, {0, 0}}, /* DBG_GRC_PARAM_PARITY_SAFE */ {{0, 0}, 0, 1, false, false, 0, {0, 0}}, /* DBG_GRC_PARAM_DUMP_CM */ {{1, 1}, 0, 1, false, false, 0, {1, 1}}, /* DBG_GRC_PARAM_DUMP_PHY */ {{0, 0}, 0, 1, false, false, 0, {0, 0}}, /* DBG_GRC_PARAM_NO_MCP */ {{0, 0}, 0, 1, false, false, 0, {0, 0}}, /* DBG_GRC_PARAM_NO_FW_VER */ {{0, 0}, 0, 1, false, false, 0, {0, 0}}, /* DBG_GRC_PARAM_RESERVED3 */ {{0, 0}, 0, 1, false, false, 0, {0, 0}}, /* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */ {{0, 1}, 0, 1, false, false, 0, {0, 1}}, /* DBG_GRC_PARAM_DUMP_ILT_CDUC */ {{1, 1}, 0, 1, false, false, 0, {0, 0}}, /* DBG_GRC_PARAM_DUMP_ILT_CDUT */ {{1, 1}, 0, 1, false, false, 0, {0, 0}}, /* DBG_GRC_PARAM_DUMP_CAU_EXT */ {{0, 0}, 0, 1, false, false, 0, {1, 1}} }; static struct rss_mem_defs s_rss_mem_defs[] = { {"rss_mem_cid", "rss_cid", 0, 32, {256, 320}}, {"rss_mem_key_msb", "rss_key", 1024, 256, {128, 208}}, {"rss_mem_key_lsb", "rss_key", 2048, 64, {128, 208}}, {"rss_mem_info", "rss_info", 3072, 16, {128, 208}}, {"rss_mem_ind", "rss_ind", 4096, 16, {16384, 26624}} }; static struct vfc_ram_defs s_vfc_ram_defs[] = { {"vfc_ram_tt1", "vfc_ram", 0, 512}, {"vfc_ram_mtt2", "vfc_ram", 512, 128}, {"vfc_ram_stt2", "vfc_ram", 640, 32}, {"vfc_ram_ro_vect", "vfc_ram", 672, 32} }; static struct big_ram_defs s_big_ram_defs[] = { {"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA, MISC_REG_BLOCK_256B_EN, {0, 0}, {153600, 180224}}, {"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA, MISC_REG_BLOCK_256B_EN, {0, 1}, {92160, 117760}}, {"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA, MISCS_REG_BLOCK_256B_EN, {0, 0}, {36864, 36864}} }; static struct rbc_reset_defs s_rbc_reset_defs[] = { {MISCS_REG_RESET_PL_HV, {0x0, 0x400}}, {MISC_REG_RESET_PL_PDA_VMAIN_1, {0x4404040, 0x4404040}}, {MISC_REG_RESET_PL_PDA_VMAIN_2, {0x7, 0x7c00007}}, {MISC_REG_RESET_PL_PDA_VAUX, {0x2, 0x2}}, }; static struct phy_defs s_phy_defs[] = { {"nw_phy", NWS_REG_NWS_CMU_K2, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2, PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2}, {"sgmii_phy", MS_REG_MS_CMU_K2, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2}, {"pcie_phy0", PHY_PCIE_REG_PHY0_K2, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, {"pcie_phy1", PHY_PCIE_REG_PHY1_K2, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, }; static struct split_type_defs s_split_type_defs[] = { /* SPLIT_TYPE_NONE */ {"eng"}, /* SPLIT_TYPE_PORT */ {"port"}, /* SPLIT_TYPE_PF */ {"pf"}, /* SPLIT_TYPE_PORT_PF */ {"port"}, /* SPLIT_TYPE_VF */ {"vf"} }; /******************************** Variables **********************************/ /* The version of the calling app */ static u32 s_app_ver; /**************************** Private Functions ******************************/ static void qed_static_asserts(void) { } /* Reads and returns a single dword from the specified unaligned buffer */ static u32 qed_read_unaligned_dword(u8 *buf) { u32 dword; memcpy((u8 *)&dword, buf, sizeof(dword)); return dword; } /* Sets the value of the specified GRC param */ static void qed_grc_set_param(struct qed_hwfn *p_hwfn, enum dbg_grc_params grc_param, u32 val) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; dev_data->grc.param_val[grc_param] = val; } /* Returns the value of the specified GRC param */ static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn, enum dbg_grc_params grc_param) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; return dev_data->grc.param_val[grc_param]; } /* Initializes the GRC parameters */ static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; if (!dev_data->grc.params_initialized) { qed_dbg_grc_set_params_default(p_hwfn); dev_data->grc.params_initialized = 1; } } /* Sets pointer and size for the specified binary buffer type */ static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn, enum bin_dbg_buffer_type buf_type, const u32 *ptr, u32 size) { struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type]; buf->ptr = (void *)ptr; buf->size = size; } /* Initializes debug data for the specified device */ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u8 num_pfs = 0, max_pfs_per_port = 0; if (dev_data->initialized) return DBG_STATUS_OK; if (!s_app_ver) return DBG_STATUS_APP_VERSION_NOT_SET; /* Set chip */ if (QED_IS_K2(p_hwfn->cdev)) { dev_data->chip_id = CHIP_K2; dev_data->mode_enable[MODE_K2] = 1; dev_data->num_vfs = MAX_NUM_VFS_K2; num_pfs = MAX_NUM_PFS_K2; max_pfs_per_port = MAX_NUM_PFS_K2 / 2; } else if (QED_IS_BB_B0(p_hwfn->cdev)) { dev_data->chip_id = CHIP_BB; dev_data->mode_enable[MODE_BB] = 1; dev_data->num_vfs = MAX_NUM_VFS_BB; num_pfs = MAX_NUM_PFS_BB; max_pfs_per_port = MAX_NUM_PFS_BB; } else { return DBG_STATUS_UNKNOWN_CHIP; } /* Set HW type */ dev_data->hw_type = HW_TYPE_ASIC; dev_data->mode_enable[MODE_ASIC] = 1; /* Set port mode */ switch (p_hwfn->cdev->num_ports_in_engine) { case 1: dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1; break; case 2: dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1; break; case 4: dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1; break; } /* Set 100G mode */ if (QED_IS_CMT(p_hwfn->cdev)) dev_data->mode_enable[MODE_100G] = 1; /* Set number of ports */ if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] || dev_data->mode_enable[MODE_100G]) dev_data->num_ports = 1; else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2]) dev_data->num_ports = 2; else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4]) dev_data->num_ports = 4; /* Set number of PFs per port */ dev_data->num_pfs_per_port = min_t(u32, num_pfs / dev_data->num_ports, max_pfs_per_port); /* Initializes the GRC parameters */ qed_dbg_grc_init_params(p_hwfn); dev_data->use_dmae = true; dev_data->initialized = 1; return DBG_STATUS_OK; } static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn, enum block_id block_id) { const struct dbg_block *dbg_block; dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr; return dbg_block + block_id; } static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn *p_hwfn, enum block_id block_id) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; return (const struct dbg_block_chip *) p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr + block_id * MAX_CHIP_IDS + dev_data->chip_id; } static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn *p_hwfn, u8 reset_reg_id) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; return (const struct dbg_reset_reg *) p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr + reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id; } /* Reads the FW info structure for the specified Storm from the chip, * and writes it to the specified fw_info pointer. */ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 storm_id, struct fw_info *fw_info) { struct storm_defs *storm = &s_storm_defs[storm_id]; struct fw_info_location fw_info_location; u32 addr, i, size, *dest; memset(&fw_info_location, 0, sizeof(fw_info_location)); memset(fw_info, 0, sizeof(*fw_info)); /* Read first the address that points to fw_info location. * The address is located in the last line of the Storm RAM. */ addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - sizeof(fw_info_location); dest = (u32 *)&fw_info_location; size = BYTES_TO_DWORDS(sizeof(fw_info_location)); for (i = 0; i < size; i++, addr += BYTES_IN_DWORD) dest[i] = qed_rd(p_hwfn, p_ptt, addr); /* Read FW version info from Storm RAM */ size = le32_to_cpu(fw_info_location.size); if (!size || size > sizeof(*fw_info)) return; addr = le32_to_cpu(fw_info_location.grc_addr); dest = (u32 *)fw_info; size = BYTES_TO_DWORDS(size); for (i = 0; i < size; i++, addr += BYTES_IN_DWORD) dest[i] = qed_rd(p_hwfn, p_ptt, addr); } /* Dumps the specified string to the specified buffer. * Returns the dumped size in bytes. */ static u32 qed_dump_str(char *dump_buf, bool dump, const char *str) { if (dump) strcpy(dump_buf, str); return (u32)strlen(str) + 1; } /* Dumps zeros to align the specified buffer to dwords. * Returns the dumped size in bytes. */ static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset) { u8 offset_in_dword, align_size; offset_in_dword = (u8)(byte_offset & 0x3); align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0; if (dump && align_size) memset(dump_buf, 0, align_size); return align_size; } /* Writes the specified string param to the specified buffer. * Returns the dumped size in dwords. */ static u32 qed_dump_str_param(u32 *dump_buf, bool dump, const char *param_name, const char *param_val) { char *char_buf = (char *)dump_buf; u32 offset = 0; /* Dump param name */ offset += qed_dump_str(char_buf + offset, dump, param_name); /* Indicate a string param value */ if (dump) *(char_buf + offset) = 1; offset++; /* Dump param value */ offset += qed_dump_str(char_buf + offset, dump, param_val); /* Align buffer to next dword */ offset += qed_dump_align(char_buf + offset, dump, offset); return BYTES_TO_DWORDS(offset); } /* Writes the specified numeric param to the specified buffer. * Returns the dumped size in dwords. */ static u32 qed_dump_num_param(u32 *dump_buf, bool dump, const char *param_name, u32 param_val) { char *char_buf = (char *)dump_buf; u32 offset = 0; /* Dump param name */ offset += qed_dump_str(char_buf + offset, dump, param_name); /* Indicate a numeric param value */ if (dump) *(char_buf + offset) = 0; offset++; /* Align buffer to next dword */ offset += qed_dump_align(char_buf + offset, dump, offset); /* Dump param value (and change offset from bytes to dwords) */ offset = BYTES_TO_DWORDS(offset); if (dump) *(dump_buf + offset) = param_val; offset++; return offset; } /* Reads the FW version and writes it as a param to the specified buffer. * Returns the dumped size in dwords. */ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { char fw_ver_str[16] = EMPTY_FW_VERSION_STR; char fw_img_str[16] = EMPTY_FW_IMAGE_STR; struct fw_info fw_info = { {0}, {0} }; u32 offset = 0; if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { /* Read FW info from chip */ qed_read_fw_info(p_hwfn, p_ptt, &fw_info); /* Create FW version/image strings */ if (snprintf(fw_ver_str, sizeof(fw_ver_str), "%d_%d_%d_%d", fw_info.ver.num.major, fw_info.ver.num.minor, fw_info.ver.num.rev, fw_info.ver.num.eng) < 0) DP_NOTICE(p_hwfn, "Unexpected debug error: invalid FW version string\n"); switch (fw_info.ver.image_id) { case FW_IMG_KUKU: strcpy(fw_img_str, "kuku"); break; case FW_IMG_MAIN: strcpy(fw_img_str, "main"); break; case FW_IMG_L2B: strcpy(fw_img_str, "l2b"); break; default: strcpy(fw_img_str, "unknown"); break; } } /* Dump FW version, image and timestamp */ offset += qed_dump_str_param(dump_buf + offset, dump, "fw-version", fw_ver_str); offset += qed_dump_str_param(dump_buf + offset, dump, "fw-image", fw_img_str); offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp", le32_to_cpu(fw_info.ver.timestamp)); return offset; } /* Reads the MFW version and writes it as a param to the specified buffer. * Returns the dumped size in dwords. */ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { char mfw_ver_str[16] = EMPTY_FW_VERSION_STR; if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { u32 global_section_offsize, global_section_addr, mfw_ver; u32 public_data_addr, global_section_offsize_addr; /* Find MCP public data GRC address. Needs to be ORed with * MCP_REG_SCRATCH due to a HW bug. */ public_data_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR) | MCP_REG_SCRATCH; /* Find MCP public global section offset */ global_section_offsize_addr = public_data_addr + offsetof(struct mcp_public_data, sections) + sizeof(offsize_t) * PUBLIC_GLOBAL; global_section_offsize = qed_rd(p_hwfn, p_ptt, global_section_offsize_addr); global_section_addr = MCP_REG_SCRATCH + (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4; /* Read MFW version from MCP public global section */ mfw_ver = qed_rd(p_hwfn, p_ptt, global_section_addr + offsetof(struct public_global, mfw_ver)); /* Dump MFW version param */ if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d", (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16), (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0) DP_NOTICE(p_hwfn, "Unexpected debug error: invalid MFW version string\n"); } return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str); } /* Reads the chip revision from the chip and writes it as a param to the * specified buffer. Returns the dumped size in dwords. */ static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; char param_str[3] = "??"; if (dev_data->hw_type == HW_TYPE_ASIC) { u32 chip_rev, chip_metal; chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV); chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL); param_str[0] = 'a' + (u8)chip_rev; param_str[1] = '0' + (u8)chip_metal; } return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str); } /* Writes a section header to the specified buffer. * Returns the dumped size in dwords. */ static u32 qed_dump_section_hdr(u32 *dump_buf, bool dump, const char *name, u32 num_params) { return qed_dump_num_param(dump_buf, dump, name, num_params); } /* Writes the common global params to the specified buffer. * Returns the dumped size in dwords. */ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, u8 num_specific_global_params) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 offset = 0; u8 num_params; /* Dump global params section header */ num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params + (dev_data->chip_id == CHIP_BB ? 1 : 0); offset += qed_dump_section_hdr(dump_buf + offset, dump, "global_params", num_params); /* Store params */ offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump); offset += qed_dump_mfw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump); offset += qed_dump_chip_revision_param(p_hwfn, p_ptt, dump_buf + offset, dump); offset += qed_dump_num_param(dump_buf + offset, dump, "tools-version", TOOLS_VERSION); offset += qed_dump_str_param(dump_buf + offset, dump, "chip", s_chip_defs[dev_data->chip_id].name); offset += qed_dump_str_param(dump_buf + offset, dump, "platform", s_hw_type_defs[dev_data->hw_type].name); offset += qed_dump_num_param(dump_buf + offset, dump, "pci-func", p_hwfn->abs_pf_id); offset += qed_dump_num_param(dump_buf + offset, dump, "epoch", qed_get_epoch_time()); if (dev_data->chip_id == CHIP_BB) offset += qed_dump_num_param(dump_buf + offset, dump, "path", QED_PATH_ID(p_hwfn)); return offset; } /* Writes the "last" section (including CRC) to the specified buffer at the * given offset. Returns the dumped size in dwords. */ static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump) { u32 start_offset = offset; /* Dump CRC section header */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0); /* Calculate CRC32 and add it to the dword after the "last" section */ if (dump) *(dump_buf + offset) = ~crc32(0xffffffff, (u8 *)dump_buf, DWORDS_TO_BYTES(offset)); offset++; return offset - start_offset; } /* Update blocks reset state */ static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 reg_val[NUM_DBG_RESET_REGS] = { 0 }; u8 rst_reg_id; u32 blk_id; /* Read reset registers */ for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) { const struct dbg_reset_reg *rst_reg; bool rst_reg_removed; u32 rst_reg_addr; rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id); rst_reg_removed = GET_FIELD(rst_reg->data, DBG_RESET_REG_IS_REMOVED); rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data, DBG_RESET_REG_ADDR)); if (!rst_reg_removed) reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt, rst_reg_addr); } /* Check if blocks are in reset */ for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) { const struct dbg_block_chip *blk; bool has_rst_reg; bool is_removed; blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id); is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED); has_rst_reg = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_HAS_RESET_REG); if (!is_removed && has_rst_reg) dev_data->block_in_reset[blk_id] = !(reg_val[blk->reset_reg_id] & BIT(blk->reset_reg_bit_offset)); } } /* is_mode_match recursive function */ static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset, u8 rec_depth) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u8 *dbg_array; bool arg1, arg2; u8 tree_val; if (rec_depth > MAX_RECURSION_DEPTH) { DP_NOTICE(p_hwfn, "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n"); return false; } /* Get next element from modes tree buffer */ dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr; tree_val = dbg_array[(*modes_buf_offset)++]; switch (tree_val) { case INIT_MODE_OP_NOT: return !qed_is_mode_match_rec(p_hwfn, modes_buf_offset, rec_depth + 1); case INIT_MODE_OP_OR: case INIT_MODE_OP_AND: arg1 = qed_is_mode_match_rec(p_hwfn, modes_buf_offset, rec_depth + 1); arg2 = qed_is_mode_match_rec(p_hwfn, modes_buf_offset, rec_depth + 1); return (tree_val == INIT_MODE_OP_OR) ? (arg1 || arg2) : (arg1 && arg2); default: return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0; } } /* Returns true if the mode (specified using modes_buf_offset) is enabled */ static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset) { return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0); } /* Enable / disable the Debug block */ static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool enable) { qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0); } /* Resets the Debug block */ static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val; const struct dbg_reset_reg *reset_reg; const struct dbg_block_chip *block; block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG); reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id); reset_reg_addr = DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR)); old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr); new_reset_reg_val = old_reset_reg_val & ~BIT(block->reset_reg_bit_offset); qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val); qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val); } /* Enable / disable Debug Bus clients according to the specified mask * (1 = enable, 0 = disable). */ static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 client_mask) { qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask); } static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum block_id block_id, u8 line_id, u8 enable_mask, u8 right_shift, u8 force_valid_mask, u8 force_frame_mask) { const struct dbg_block_chip *block = qed_get_dbg_block_per_chip(p_hwfn, block_id); qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr), line_id); qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr), enable_mask); qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr), right_shift); qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr), force_valid_mask); qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr), force_frame_mask); } /* Disable debug bus in all blocks */ static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 block_id; /* Disable all blocks */ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { const struct dbg_block_chip *block_per_chip = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id); if (GET_FIELD(block_per_chip->flags, DBG_BLOCK_CHIP_IS_REMOVED) || dev_data->block_in_reset[block_id]) continue; /* Disable debug bus */ if (GET_FIELD(block_per_chip->flags, DBG_BLOCK_CHIP_HAS_DBG_BUS)) { u32 dbg_en_addr = block_per_chip->dbg_dword_enable_reg_addr; u16 modes_buf_offset = GET_FIELD(block_per_chip->dbg_bus_mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); bool eval_mode = GET_FIELD(block_per_chip->dbg_bus_mode.data, DBG_MODE_HDR_EVAL_MODE) > 0; if (!eval_mode || qed_is_mode_match(p_hwfn, &modes_buf_offset)) qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(dbg_en_addr), 0); } } } /* Returns true if the specified entity (indicated by GRC param) should be * included in the dump, false otherwise. */ static bool qed_grc_is_included(struct qed_hwfn *p_hwfn, enum dbg_grc_params grc_param) { return qed_grc_get_param(p_hwfn, grc_param) > 0; } /* Returns the storm_id that matches the specified Storm letter, * or MAX_DBG_STORMS if invalid storm letter. */ static enum dbg_storms qed_get_id_from_letter(char storm_letter) { u8 storm_id; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) if (s_storm_defs[storm_id].letter == storm_letter) return (enum dbg_storms)storm_id; return MAX_DBG_STORMS; } /* Returns true of the specified Storm should be included in the dump, false * otherwise. */ static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn, enum dbg_storms storm) { return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0; } /* Returns true if the specified memory should be included in the dump, false * otherwise. */ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn, enum block_id block_id, u8 mem_group_id) { const struct dbg_block *block; u8 i; block = get_dbg_block(p_hwfn, block_id); /* If the block is associated with a Storm, check Storm match */ if (block->associated_storm_letter) { enum dbg_storms associated_storm_id = qed_get_id_from_letter(block->associated_storm_letter); if (associated_storm_id == MAX_DBG_STORMS || !qed_grc_is_storm_included(p_hwfn, associated_storm_id)) return false; } for (i = 0; i < NUM_BIG_RAM_TYPES; i++) { struct big_ram_defs *big_ram = &s_big_ram_defs[i]; if (mem_group_id == big_ram->mem_group_id || mem_group_id == big_ram->ram_mem_group_id) return qed_grc_is_included(p_hwfn, big_ram->grc_param); } switch (mem_group_id) { case MEM_GROUP_PXP_ILT: case MEM_GROUP_PXP_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP); case MEM_GROUP_RAM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM); case MEM_GROUP_PBUF: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF); case MEM_GROUP_CAU_MEM: case MEM_GROUP_CAU_SB: case MEM_GROUP_CAU_PI: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU); case MEM_GROUP_CAU_MEM_EXT: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT); case MEM_GROUP_QM_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM); case MEM_GROUP_CFC_MEM: case MEM_GROUP_CONN_CFC_MEM: case MEM_GROUP_TASK_CFC_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) || qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX); case MEM_GROUP_DORQ_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ); case MEM_GROUP_IGU_MEM: case MEM_GROUP_IGU_MSIX: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU); case MEM_GROUP_MULD_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD); case MEM_GROUP_PRS_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS); case MEM_GROUP_DMAE_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE); case MEM_GROUP_TM_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM); case MEM_GROUP_SDM_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM); case MEM_GROUP_TDIF_CTX: case MEM_GROUP_RDIF_CTX: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF); case MEM_GROUP_CM_MEM: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM); case MEM_GROUP_IOR: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR); default: return true; } } /* Stalls all Storms */ static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool stall) { u32 reg_addr; u8 storm_id; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { if (!qed_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id)) continue; reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + SEM_FAST_REG_STALL_0; qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0); } msleep(STALL_DELAY_MS); } /* Takes all blocks out of reset. If rbc_only is true, only RBC clients are * taken out of reset. */ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool rbc_only) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u8 chip_id = dev_data->chip_id; u32 i; /* Take RBCs out of reset */ for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++) if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id]) qed_wr(p_hwfn, p_ptt, s_rbc_reset_defs[i].reset_reg_addr + RESET_REG_UNRESET_OFFSET, s_rbc_reset_defs[i].reset_val[chip_id]); if (!rbc_only) { u32 reg_val[NUM_DBG_RESET_REGS] = { 0 }; u8 reset_reg_id; u32 block_id; /* Fill reset regs values */ for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) { bool is_removed, has_reset_reg, unreset_before_dump; const struct dbg_block_chip *block; block = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id) block_id); is_removed = GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED); has_reset_reg = GET_FIELD(block->flags, DBG_BLOCK_CHIP_HAS_RESET_REG); unreset_before_dump = GET_FIELD(block->flags, DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP); if (!is_removed && has_reset_reg && unreset_before_dump) reg_val[block->reset_reg_id] |= BIT(block->reset_reg_bit_offset); } /* Write reset registers */ for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS; reset_reg_id++) { const struct dbg_reset_reg *reset_reg; u32 reset_reg_addr; reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id); if (GET_FIELD (reset_reg->data, DBG_RESET_REG_IS_REMOVED)) continue; if (reg_val[reset_reg_id]) { reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR); qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(reset_reg_addr) + RESET_REG_UNRESET_OFFSET, reg_val[reset_reg_id]); } } } } /* Returns the attention block data of the specified block */ static const struct dbg_attn_block_type_data * qed_get_block_attn_data(struct qed_hwfn *p_hwfn, enum block_id block_id, enum dbg_attn_type attn_type) { const struct dbg_attn_block *base_attn_block_arr = (const struct dbg_attn_block *) p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr; return &base_attn_block_arr[block_id].per_type_data[attn_type]; } /* Returns the attention registers of the specified block */ static const struct dbg_attn_reg * qed_get_block_attn_regs(struct qed_hwfn *p_hwfn, enum block_id block_id, enum dbg_attn_type attn_type, u8 *num_attn_regs) { const struct dbg_attn_block_type_data *block_type_data = qed_get_block_attn_data(p_hwfn, block_id, attn_type); *num_attn_regs = block_type_data->num_regs; return (const struct dbg_attn_reg *) p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr + block_type_data->regs_offset; } /* For each block, clear the status of all parities */ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; const struct dbg_attn_reg *attn_reg_arr; u32 block_id, sts_clr_address; u8 reg_idx, num_attn_regs; for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) { if (dev_data->block_in_reset[block_id]) continue; attn_reg_arr = qed_get_block_attn_regs(p_hwfn, (enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs); for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) { const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx]; u16 modes_buf_offset; bool eval_mode; /* Check mode */ eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0; modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); sts_clr_address = reg_data->sts_clr_address; /* If Mode match: clear parity status */ if (!eval_mode || qed_is_mode_match(p_hwfn, &modes_buf_offset)) qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(sts_clr_address)); } } } /* Finds the meta data image in NVRAM */ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 image_type, u32 *nvram_offset_bytes, u32 *nvram_size_bytes, bool b_can_sleep) { u32 ret_mcp_resp, ret_mcp_param, ret_txn_size; struct mcp_file_att file_att; int nvm_result; /* Call NVRAM get file command */ nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT, image_type, &ret_mcp_resp, &ret_mcp_param, &ret_txn_size, (u32 *)&file_att, b_can_sleep); /* Check response */ if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) return DBG_STATUS_NVRAM_GET_IMAGE_FAILED; /* Update return values */ *nvram_offset_bytes = file_att.nvm_start_addr; *nvram_size_bytes = file_att.len; DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", image_type, *nvram_offset_bytes, *nvram_size_bytes); /* Check alignment */ if (*nvram_size_bytes & 0x3) return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE; return DBG_STATUS_OK; } /* Reads data from NVRAM */ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 nvram_offset_bytes, u32 nvram_size_bytes, u32 *ret_buf, bool b_can_sleep) { u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy; s32 bytes_left = nvram_size_bytes; u32 read_offset = 0, param = 0; DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "nvram_read: reading image of size %d bytes from NVRAM\n", nvram_size_bytes); do { bytes_to_copy = (bytes_left > MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left; /* Call NVRAM read command */ SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_OFFSET, nvram_offset_bytes + read_offset); SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy); if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_READ_NVRAM, param, &ret_mcp_resp, &ret_mcp_param, &ret_read_size, (u32 *)((u8 *)ret_buf + read_offset), b_can_sleep)) return DBG_STATUS_NVRAM_READ_FAILED; /* Check response */ if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) return DBG_STATUS_NVRAM_READ_FAILED; /* Update read offset */ read_offset += ret_read_size; bytes_left -= ret_read_size; } while (bytes_left > 0); return DBG_STATUS_OK; } /* Dumps GRC registers section header. Returns the dumped size in dwords. * the following parameters are dumped: * - count: no. of dumped entries * - split_type: split type * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE) * - reg_type_name: register type name (dumped only if reg_type_name != NULL) */ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, bool dump, u32 num_reg_entries, enum init_split_types split_type, u8 split_id, const char *reg_type_name) { u8 num_params = 2 + (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0); u32 offset = 0; offset += qed_dump_section_hdr(dump_buf + offset, dump, "grc_regs", num_params); offset += qed_dump_num_param(dump_buf + offset, dump, "count", num_reg_entries); offset += qed_dump_str_param(dump_buf + offset, dump, "split", s_split_type_defs[split_type].name); if (split_type != SPLIT_TYPE_NONE) offset += qed_dump_num_param(dump_buf + offset, dump, "id", split_id); if (reg_type_name) offset += qed_dump_str_param(dump_buf + offset, dump, "type", reg_type_name); return offset; } /* Reads the specified registers into the specified buffer. * The addr and len arguments are specified in dwords. */ void qed_read_regs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len) { u32 i; for (i = 0; i < len; i++) buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i)); } /* Dumps the GRC registers in the specified address range. * Returns the dumped size in dwords. * The addr and len arguments are specified in dwords. */ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, u32 addr, u32 len, bool wide_bus, enum init_split_types split_type, u8 split_id) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u8 port_id = 0, pf_id = 0, vf_id = 0; bool read_using_dmae = false; u32 thresh; u16 fid; if (!dump) return len; switch (split_type) { case SPLIT_TYPE_PORT: port_id = split_id; break; case SPLIT_TYPE_PF: pf_id = split_id; break; case SPLIT_TYPE_PORT_PF: port_id = split_id / dev_data->num_pfs_per_port; pf_id = port_id + dev_data->num_ports * (split_id % dev_data->num_pfs_per_port); break; case SPLIT_TYPE_VF: vf_id = split_id; break; default: break; } /* Try reading using DMAE */ if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF && (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh || (PROTECT_WIDE_BUS && wide_bus))) { struct qed_dmae_params dmae_params; /* Set DMAE params */ memset(&dmae_params, 0, sizeof(dmae_params)); SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1); switch (split_type) { case SPLIT_TYPE_PORT: SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID, 1); dmae_params.port_id = port_id; break; case SPLIT_TYPE_PF: SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_SRC_PF_VALID, 1); dmae_params.src_pfid = pf_id; break; case SPLIT_TYPE_PORT_PF: SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID, 1); SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_SRC_PF_VALID, 1); dmae_params.port_id = port_id; dmae_params.src_pfid = pf_id; break; default: break; } /* Execute DMAE command */ read_using_dmae = !qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr), (u64)(uintptr_t)(dump_buf), len, &dmae_params); if (!read_using_dmae) { dev_data->use_dmae = 0; DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Failed reading from chip using DMAE, using GRC instead\n"); } } if (read_using_dmae) goto print_log; /* If not read using DMAE, read using GRC */ /* Set pretend */ if (split_type != dev_data->pretend.split_type || split_id != dev_data->pretend.split_id) { switch (split_type) { case SPLIT_TYPE_PORT: qed_port_pretend(p_hwfn, p_ptt, port_id); break; case SPLIT_TYPE_PF: fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, pf_id); qed_fid_pretend(p_hwfn, p_ptt, fid); break; case SPLIT_TYPE_PORT_PF: fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, pf_id); qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid); break; case SPLIT_TYPE_VF: fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1) | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID, vf_id); qed_fid_pretend(p_hwfn, p_ptt, fid); break; default: break; } dev_data->pretend.split_type = (u8)split_type; dev_data->pretend.split_id = split_id; } /* Read registers using GRC */ qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len); print_log: /* Print log */ dev_data->num_regs_read += len; thresh = s_hw_type_defs[dev_data->hw_type].log_thresh; if ((dev_data->num_regs_read / thresh) > ((dev_data->num_regs_read - len) / thresh)) DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumped %d registers...\n", dev_data->num_regs_read); return len; } /* Dumps GRC registers sequence header. Returns the dumped size in dwords. * The addr and len arguments are specified in dwords. */ static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr, u32 len) { if (dump) *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT); return 1; } /* Dumps GRC registers sequence. Returns the dumped size in dwords. * The addr and len arguments are specified in dwords. */ static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, u32 addr, u32 len, bool wide_bus, enum init_split_types split_type, u8 split_id) { u32 offset = 0; offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len); offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus, split_type, split_id); return offset; } /* Dumps GRC registers sequence with skip cycle. * Returns the dumped size in dwords. * - addr: start GRC address in dwords * - total_len: total no. of dwords to dump * - read_len: no. consecutive dwords to read * - skip_len: no. of dwords to skip (and fill with zeros) */ static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, u32 addr, u32 total_len, u32 read_len, u32 skip_len) { u32 offset = 0, reg_offset = 0; offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len); if (!dump) return offset + total_len; while (reg_offset < total_len) { u32 curr_len = min_t(u32, read_len, total_len - reg_offset); offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, curr_len, false, SPLIT_TYPE_NONE, 0); reg_offset += curr_len; addr += curr_len; if (reg_offset < total_len) { curr_len = min_t(u32, skip_len, total_len - skip_len); memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len)); offset += curr_len; reg_offset += curr_len; addr += curr_len; } } return offset; } /* Dumps GRC registers entries. Returns the dumped size in dwords. */ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct virt_mem_desc input_regs_arr, u32 *dump_buf, bool dump, enum init_split_types split_type, u8 split_id, bool block_enable[MAX_BLOCK_ID], u32 *num_dumped_reg_entries) { u32 i, offset = 0, input_offset = 0; bool mode_match = true; *num_dumped_reg_entries = 0; while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) { const struct dbg_dump_cond_hdr *cond_hdr = (const struct dbg_dump_cond_hdr *) input_regs_arr.ptr + input_offset++; u16 modes_buf_offset; bool eval_mode; /* Check mode/block */ eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0; if (eval_mode) { modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); mode_match = qed_is_mode_match(p_hwfn, &modes_buf_offset); } if (!mode_match || !block_enable[cond_hdr->block_id]) { input_offset += cond_hdr->data_size; continue; } for (i = 0; i < cond_hdr->data_size; i++, input_offset++) { const struct dbg_dump_reg *reg = (const struct dbg_dump_reg *) input_regs_arr.ptr + input_offset; u32 addr, len; bool wide_bus; addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS); len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH); wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS); offset += qed_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus, split_type, split_id); (*num_dumped_reg_entries)++; } } return offset; } /* Dumps GRC registers entries. Returns the dumped size in dwords. */ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct virt_mem_desc input_regs_arr, u32 *dump_buf, bool dump, bool block_enable[MAX_BLOCK_ID], enum init_split_types split_type, u8 split_id, const char *reg_type_name) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; enum init_split_types hdr_split_type = split_type; u32 num_dumped_reg_entries, offset; u8 hdr_split_id = split_id; /* In PORT_PF split type, print a port split header */ if (split_type == SPLIT_TYPE_PORT_PF) { hdr_split_type = SPLIT_TYPE_PORT; hdr_split_id = split_id / dev_data->num_pfs_per_port; } /* Calculate register dump header size (and skip it for now) */ offset = qed_grc_dump_regs_hdr(dump_buf, false, 0, hdr_split_type, hdr_split_id, reg_type_name); /* Dump registers */ offset += qed_grc_dump_regs_entries(p_hwfn, p_ptt, input_regs_arr, dump_buf + offset, dump, split_type, split_id, block_enable, &num_dumped_reg_entries); /* Write register dump header */ if (dump && num_dumped_reg_entries > 0) qed_grc_dump_regs_hdr(dump_buf, dump, num_dumped_reg_entries, hdr_split_type, hdr_split_id, reg_type_name); return num_dumped_reg_entries > 0 ? offset : 0; } /* Dumps registers according to the input registers array. Returns the dumped * size in dwords. */ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, bool block_enable[MAX_BLOCK_ID], const char *reg_type_name) { struct virt_mem_desc *dbg_buf = &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG]; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 offset = 0, input_offset = 0; while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) { const struct dbg_dump_split_hdr *split_hdr; struct virt_mem_desc curr_input_regs_arr; enum init_split_types split_type; u16 split_count = 0; u32 split_data_size; u8 split_id; split_hdr = (const struct dbg_dump_split_hdr *) dbg_buf->ptr + input_offset++; split_type = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE); curr_input_regs_arr.ptr = (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr + input_offset; curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size); switch (split_type) { case SPLIT_TYPE_NONE: split_count = 1; break; case SPLIT_TYPE_PORT: split_count = dev_data->num_ports; break; case SPLIT_TYPE_PF: case SPLIT_TYPE_PORT_PF: split_count = dev_data->num_ports * dev_data->num_pfs_per_port; break; case SPLIT_TYPE_VF: split_count = dev_data->num_vfs; break; default: return 0; } for (split_id = 0; split_id < split_count; split_id++) offset += qed_grc_dump_split_data(p_hwfn, p_ptt, curr_input_regs_arr, dump_buf + offset, dump, block_enable, split_type, split_id, reg_type_name); input_offset += split_data_size; } /* Cancel pretends (pretend to original PF) */ if (dump) { qed_fid_pretend(p_hwfn, p_ptt, FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, p_hwfn->rel_pf_id)); dev_data->pretend.split_type = SPLIT_TYPE_NONE; dev_data->pretend.split_id = 0; } return offset; } /* Dump reset registers. Returns the dumped size in dwords. */ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { u32 offset = 0, num_regs = 0; u8 reset_reg_id; /* Calculate header size */ offset += qed_grc_dump_regs_hdr(dump_buf, false, 0, SPLIT_TYPE_NONE, 0, "RESET_REGS"); /* Write reset registers */ for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS; reset_reg_id++) { const struct dbg_reset_reg *reset_reg; u32 reset_reg_addr; reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id); if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED)) continue; reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR); offset += qed_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, reset_reg_addr, 1, false, SPLIT_TYPE_NONE, 0); num_regs++; } /* Write header */ if (dump) qed_grc_dump_regs_hdr(dump_buf, true, num_regs, SPLIT_TYPE_NONE, 0, "RESET_REGS"); return offset; } /* Dump registers that are modified during GRC Dump and therefore must be * dumped first. Returns the dumped size in dwords. */ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 block_id, offset = 0, stall_regs_offset; const struct dbg_attn_reg *attn_reg_arr; u8 storm_id, reg_idx, num_attn_regs; u32 num_reg_entries = 0; /* Write empty header for attention registers */ offset += qed_grc_dump_regs_hdr(dump_buf, false, 0, SPLIT_TYPE_NONE, 0, "ATTN_REGS"); /* Write parity registers */ for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) { if (dev_data->block_in_reset[block_id] && dump) continue; attn_reg_arr = qed_get_block_attn_regs(p_hwfn, (enum block_id)block_id, ATTN_TYPE_PARITY, &num_attn_regs); for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) { const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx]; u16 modes_buf_offset; bool eval_mode; u32 addr; /* Check mode */ eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0; modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset)) continue; /* Mode match: read & dump registers */ addr = reg_data->mask_address; offset += qed_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, addr, 1, false, SPLIT_TYPE_NONE, 0); addr = GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS); offset += qed_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, addr, 1, false, SPLIT_TYPE_NONE, 0); num_reg_entries += 2; } } /* Overwrite header for attention registers */ if (dump) qed_grc_dump_regs_hdr(dump_buf, true, num_reg_entries, SPLIT_TYPE_NONE, 0, "ATTN_REGS"); /* Write empty header for stall registers */ stall_regs_offset = offset; offset += qed_grc_dump_regs_hdr(dump_buf, false, 0, SPLIT_TYPE_NONE, 0, "REGS"); /* Write Storm stall status registers */ for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS; storm_id++) { struct storm_defs *storm = &s_storm_defs[storm_id]; u32 addr; if (dev_data->block_in_reset[storm->sem_block_id] && dump) continue; addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STALLED); offset += qed_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, addr, 1, false, SPLIT_TYPE_NONE, 0); num_reg_entries++; } /* Overwrite header for stall registers */ if (dump) qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset, true, num_reg_entries, SPLIT_TYPE_NONE, 0, "REGS"); return offset; } /* Dumps registers that can't be represented in the debug arrays */ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { u32 offset = 0, addr; offset += qed_grc_dump_regs_hdr(dump_buf, dump, 2, SPLIT_TYPE_NONE, 0, "REGS"); /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be * skipped). */ addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO); offset += qed_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, addr, RDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1); addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO); offset += qed_grc_dump_reg_entry_skip(p_hwfn, p_ptt, dump_buf + offset, dump, addr, TDIF_REG_DEBUG_ERROR_INFO_SIZE, 7, 1); return offset; } /* Dumps a GRC memory header (section and params). Returns the dumped size in * dwords. The following parameters are dumped: * - name: dumped only if it's not NULL. * - addr: in dwords, dumped only if name is NULL. * - len: in dwords, always dumped. * - width: dumped if it's not zero. * - packed: dumped only if it's not false. * - mem_group: always dumped. * - is_storm: true only if the memory is related to a Storm. * - storm_letter: valid only if is_storm is true. * */ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, const char *name, u32 addr, u32 len, u32 bit_width, bool packed, const char *mem_group, char storm_letter) { u8 num_params = 3; u32 offset = 0; char buf[64]; if (!len) DP_NOTICE(p_hwfn, "Unexpected GRC Dump error: dumped memory size must be non-zero\n"); if (bit_width) num_params++; if (packed) num_params++; /* Dump section header */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "grc_mem", num_params); if (name) { /* Dump name */ if (storm_letter) { strcpy(buf, "?STORM_"); buf[0] = storm_letter; strcpy(buf + strlen(buf), name); } else { strcpy(buf, name); } offset += qed_dump_str_param(dump_buf + offset, dump, "name", buf); } else { /* Dump address */ u32 addr_in_bytes = DWORDS_TO_BYTES(addr); offset += qed_dump_num_param(dump_buf + offset, dump, "addr", addr_in_bytes); } /* Dump len */ offset += qed_dump_num_param(dump_buf + offset, dump, "len", len); /* Dump bit width */ if (bit_width) offset += qed_dump_num_param(dump_buf + offset, dump, "width", bit_width); /* Dump packed */ if (packed) offset += qed_dump_num_param(dump_buf + offset, dump, "packed", 1); /* Dump reg type */ if (storm_letter) { strcpy(buf, "?STORM_"); buf[0] = storm_letter; strcpy(buf + strlen(buf), mem_group); } else { strcpy(buf, mem_group); } offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf); return offset; } /* Dumps a single GRC memory. If name is NULL, the memory is stored by address. * Returns the dumped size in dwords. * The addr and len arguments are specified in dwords. */ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, const char *name, u32 addr, u32 len, bool wide_bus, u32 bit_width, bool packed, const char *mem_group, char storm_letter) { u32 offset = 0; offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, addr, len, bit_width, packed, mem_group, storm_letter); offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, wide_bus, SPLIT_TYPE_NONE, 0); return offset; } /* Dumps GRC memories entries. Returns the dumped size in dwords. */ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct virt_mem_desc input_mems_arr, u32 *dump_buf, bool dump) { u32 i, offset = 0, input_offset = 0; bool mode_match = true; while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) { const struct dbg_dump_cond_hdr *cond_hdr; u16 modes_buf_offset; u32 num_entries; bool eval_mode; cond_hdr = (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr + input_offset++; num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS; /* Check required mode */ eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0; if (eval_mode) { modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); mode_match = qed_is_mode_match(p_hwfn, &modes_buf_offset); } if (!mode_match) { input_offset += cond_hdr->data_size; continue; } for (i = 0; i < num_entries; i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) { const struct dbg_dump_mem *mem = (const struct dbg_dump_mem *)((u32 *) input_mems_arr.ptr + input_offset); const struct dbg_block *block; char storm_letter = 0; u32 mem_addr, mem_len; bool mem_wide_bus; u8 mem_group_id; mem_group_id = GET_FIELD(mem->dword0, DBG_DUMP_MEM_MEM_GROUP_ID); if (mem_group_id >= MEM_GROUPS_NUM) { DP_NOTICE(p_hwfn, "Invalid mem_group_id\n"); return 0; } if (!qed_grc_is_mem_included(p_hwfn, (enum block_id) cond_hdr->block_id, mem_group_id)) continue; mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS); mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH); mem_wide_bus = GET_FIELD(mem->dword1, DBG_DUMP_MEM_WIDE_BUS); block = get_dbg_block(p_hwfn, cond_hdr->block_id); /* If memory is associated with Storm, * update storm details */ if (block->associated_storm_letter) storm_letter = block->associated_storm_letter; /* Dump memory */ offset += qed_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, NULL, mem_addr, mem_len, mem_wide_bus, 0, false, s_mem_group_names[mem_group_id], storm_letter); } } return offset; } /* Dumps GRC memories according to the input array dump_mem. * Returns the dumped size in dwords. */ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { struct virt_mem_desc *dbg_buf = &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM]; u32 offset = 0, input_offset = 0; while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) { const struct dbg_dump_split_hdr *split_hdr; struct virt_mem_desc curr_input_mems_arr; enum init_split_types split_type; u32 split_data_size; split_hdr = (const struct dbg_dump_split_hdr *)dbg_buf->ptr + input_offset++; split_type = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); split_data_size = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_DATA_SIZE); curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset; curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size); if (split_type == SPLIT_TYPE_NONE) offset += qed_grc_dump_mem_entries(p_hwfn, p_ptt, curr_input_mems_arr, dump_buf + offset, dump); else DP_NOTICE(p_hwfn, "Dumping split memories is currently not supported\n"); input_offset += split_data_size; } return offset; } /* Dumps GRC context data for the specified Storm. * Returns the dumped size in dwords. * The lid_size argument is specified in quad-regs. */ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, const char *name, u32 num_lids, enum cm_ctx_types ctx_type, u8 storm_id) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; struct storm_defs *storm = &s_storm_defs[storm_id]; u32 i, lid, lid_size, total_size; u32 rd_reg_addr, offset = 0; /* Convert quad-regs to dwords */ lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4; if (!lid_size) return 0; total_size = num_lids * lid_size; offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, name, 0, total_size, lid_size * 32, false, name, storm->letter); if (!dump) return offset + total_size; rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]); /* Dump context data */ for (lid = 0; lid < num_lids; lid++) { for (i = 0; i < lid_size; i++) { qed_wr(p_hwfn, p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid); offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, rd_reg_addr, 1, false, SPLIT_TYPE_NONE, 0); } } return offset; } /* Dumps GRC contexts. Returns the dumped size in dwords. */ static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { u32 offset = 0; u8 storm_id; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { if (!qed_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id)) continue; /* Dump Conn AG context size */ offset += qed_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_AG_CTX", NUM_OF_LCIDS, CM_CTX_CONN_AG, storm_id); /* Dump Conn ST context size */ offset += qed_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "CONN_ST_CTX", NUM_OF_LCIDS, CM_CTX_CONN_ST, storm_id); /* Dump Task AG context size */ offset += qed_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_AG_CTX", NUM_OF_LTIDS, CM_CTX_TASK_AG, storm_id); /* Dump Task ST context size */ offset += qed_grc_dump_ctx_data(p_hwfn, p_ptt, dump_buf + offset, dump, "TASK_ST_CTX", NUM_OF_LTIDS, CM_CTX_TASK_ST, storm_id); } return offset; } #define VFC_STATUS_RESP_READY_BIT 0 #define VFC_STATUS_BUSY_BIT 1 #define VFC_STATUS_SENDING_CMD_BIT 2 #define VFC_POLLING_DELAY_MS 1 #define VFC_POLLING_COUNT 20 /* Reads data from VFC. Returns the number of dwords read (0 on error). * Sizes are specified in dwords. */ static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct storm_defs *storm, u32 *cmd_data, u32 cmd_size, u32 *addr_data, u32 addr_size, u32 resp_size, u32 *dump_buf) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 vfc_status, polling_ms, polling_count = 0, i; u32 reg_addr, sem_base; bool is_ready = false; sem_base = storm->sem_fast_mem_addr; polling_ms = VFC_POLLING_DELAY_MS * s_hw_type_defs[dev_data->hw_type].delay_factor; /* Write VFC command */ ARR_REG_WR(p_hwfn, p_ptt, sem_base + SEM_FAST_REG_VFC_DATA_WR, cmd_data, cmd_size); /* Write VFC address */ ARR_REG_WR(p_hwfn, p_ptt, sem_base + SEM_FAST_REG_VFC_ADDR, addr_data, addr_size); /* Read response */ for (i = 0; i < resp_size; i++) { /* Poll until ready */ do { reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS; qed_grc_dump_addr_range(p_hwfn, p_ptt, &vfc_status, true, BYTES_TO_DWORDS(reg_addr), 1, false, SPLIT_TYPE_NONE, 0); is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT); if (!is_ready) { if (polling_count++ == VFC_POLLING_COUNT) return 0; msleep(polling_ms); } } while (!is_ready); reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD; qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + i, true, BYTES_TO_DWORDS(reg_addr), 1, false, SPLIT_TYPE_NONE, 0); } return resp_size; } /* Dump VFC CAM. Returns the dumped size in dwords. */ static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, u8 storm_id) { u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS; struct storm_defs *storm = &s_storm_defs[storm_id]; u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 }; u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 }; u32 row, offset = 0; offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, "vfc_cam", 0, total_size, 256, false, "vfc_cam", storm->letter); if (!dump) return offset + total_size; /* Prepare CAM address */ SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD); /* Read VFC CAM data */ for (row = 0; row < VFC_CAM_NUM_ROWS; row++) { SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row); offset += qed_grc_dump_read_from_vfc(p_hwfn, p_ptt, storm, cam_cmd, VFC_CAM_CMD_DWORDS, cam_addr, VFC_CAM_ADDR_DWORDS, VFC_CAM_RESP_DWORDS, dump_buf + offset); } return offset; } /* Dump VFC RAM. Returns the dumped size in dwords. */ static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, u8 storm_id, struct vfc_ram_defs *ram_defs) { u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS; struct storm_defs *storm = &s_storm_defs[storm_id]; u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 }; u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 }; u32 row, offset = 0; offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, ram_defs->mem_name, 0, total_size, 256, false, ram_defs->type_name, storm->letter); if (!dump) return offset + total_size; /* Prepare RAM address */ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD); /* Read VFC RAM data */ for (row = ram_defs->base_row; row < ram_defs->base_row + ram_defs->num_rows; row++) { SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row); offset += qed_grc_dump_read_from_vfc(p_hwfn, p_ptt, storm, ram_cmd, VFC_RAM_CMD_DWORDS, ram_addr, VFC_RAM_ADDR_DWORDS, VFC_RAM_RESP_DWORDS, dump_buf + offset); } return offset; } /* Dumps GRC VFC data. Returns the dumped size in dwords. */ static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { u8 storm_id, i; u32 offset = 0; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { if (!qed_grc_is_storm_included(p_hwfn, (enum dbg_storms)storm_id) || !s_storm_defs[storm_id].has_vfc) continue; /* Read CAM */ offset += qed_grc_dump_vfc_cam(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id); /* Read RAM */ for (i = 0; i < NUM_VFC_RAM_TYPES; i++) offset += qed_grc_dump_vfc_ram(p_hwfn, p_ptt, dump_buf + offset, dump, storm_id, &s_vfc_ram_defs[i]); } return offset; } /* Dumps GRC RSS data. Returns the dumped size in dwords. */ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 offset = 0; u8 rss_mem_id; for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) { u32 rss_addr, num_entries, total_dwords; struct rss_mem_defs *rss_defs; u32 addr, num_dwords_to_read; bool packed; rss_defs = &s_rss_mem_defs[rss_mem_id]; rss_addr = rss_defs->addr; num_entries = rss_defs->num_entries[dev_data->chip_id]; total_dwords = (num_entries * rss_defs->entry_width) / 32; packed = (rss_defs->entry_width == 16); offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, rss_defs->mem_name, 0, total_dwords, rss_defs->entry_width, packed, rss_defs->type_name, 0); /* Dump RSS data */ if (!dump) { offset += total_dwords; continue; } addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA); while (total_dwords) { num_dwords_to_read = min_t(u32, RSS_REG_RSS_RAM_DATA_SIZE, total_dwords); qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr); offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, num_dwords_to_read, false, SPLIT_TYPE_NONE, 0); total_dwords -= num_dwords_to_read; rss_addr++; } } return offset; } /* Dumps GRC Big RAM. Returns the dumped size in dwords. */ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, u8 big_ram_id) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 block_size, ram_size, offset = 0, reg_val, i; char mem_name[12] = "???_BIG_RAM"; char type_name[8] = "???_RAM"; struct big_ram_defs *big_ram; big_ram = &s_big_ram_defs[big_ram_id]; ram_size = big_ram->ram_size[dev_data->chip_id]; reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr); block_size = reg_val & BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256 : 128; strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN); strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN); /* Dump memory header */ offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, ram_size, block_size * 8, false, type_name, 0); /* Read and dump Big RAM data */ if (!dump) return offset + ram_size; /* Dump Big RAM */ for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE); i++) { u32 addr, len; qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i); addr = BYTES_TO_DWORDS(big_ram->data_reg_addr); len = BRB_REG_BIG_RAM_DATA_SIZE; offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, false, SPLIT_TYPE_NONE, 0); } return offset; } /* Dumps MCP scratchpad. Returns the dumped size in dwords. */ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { bool block_enable[MAX_BLOCK_ID] = { 0 }; u32 offset = 0, addr; bool halted = false; /* Halt MCP */ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) { halted = !qed_mcp_halt(p_hwfn, p_ptt); if (!halted) DP_NOTICE(p_hwfn, "MCP halt failed!\n"); } /* Dump MCP scratchpad */ offset += qed_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, NULL, BYTES_TO_DWORDS(MCP_REG_SCRATCH), MCP_REG_SCRATCH_SIZE, false, 0, false, "MCP", 0); /* Dump MCP cpu_reg_file */ offset += qed_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, dump, NULL, BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE), MCP_REG_CPU_REG_FILE_SIZE, false, 0, false, "MCP", 0); /* Dump MCP registers */ block_enable[BLOCK_MCP] = true; offset += qed_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, "MCP"); /* Dump required non-MCP registers */ offset += qed_grc_dump_regs_hdr(dump_buf + offset, dump, 1, SPLIT_TYPE_NONE, 0, "MCP"); addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR); offset += qed_grc_dump_reg_entry(p_hwfn, p_ptt, dump_buf + offset, dump, addr, 1, false, SPLIT_TYPE_NONE, 0); /* Release MCP */ if (halted && qed_mcp_resume(p_hwfn, p_ptt)) DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n"); return offset; } /* Dumps the tbus indirect memory for all PHYs. * Returns the dumped size in dwords. */ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { u32 offset = 0, tbus_lo_offset, tbus_hi_offset; char mem_name[32]; u8 phy_id; for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) { u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr; struct phy_defs *phy_defs; u8 *bytes_buf; phy_defs = &s_phy_defs[phy_id]; addr_lo_addr = phy_defs->base_addr + phy_defs->tbus_addr_lo_addr; addr_hi_addr = phy_defs->base_addr + phy_defs->tbus_addr_hi_addr; data_lo_addr = phy_defs->base_addr + phy_defs->tbus_data_lo_addr; data_hi_addr = phy_defs->base_addr + phy_defs->tbus_data_hi_addr; if (snprintf(mem_name, sizeof(mem_name), "tbus_%s", phy_defs->phy_name) < 0) DP_NOTICE(p_hwfn, "Unexpected debug error: invalid PHY memory name\n"); offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, mem_name, 0, PHY_DUMP_SIZE_DWORDS, 16, true, mem_name, 0); if (!dump) { offset += PHY_DUMP_SIZE_DWORDS; continue; } bytes_buf = (u8 *)(dump_buf + offset); for (tbus_hi_offset = 0; tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); tbus_hi_offset++) { qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset); for (tbus_lo_offset = 0; tbus_lo_offset < 256; tbus_lo_offset++) { qed_wr(p_hwfn, p_ptt, addr_lo_addr, tbus_lo_offset); *(bytes_buf++) = (u8)qed_rd(p_hwfn, p_ptt, data_lo_addr); *(bytes_buf++) = (u8)qed_rd(p_hwfn, p_ptt, data_hi_addr); } } offset += PHY_DUMP_SIZE_DWORDS; } return offset; } /* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */ static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0; u32 hw_dump_size_dwords = 0, offset = 0; enum dbg_status status; /* Read HW dump image from NVRAM */ status = qed_find_nvram_image(p_hwfn, p_ptt, NVM_TYPE_HW_DUMP_OUT, &hw_dump_offset_bytes, &hw_dump_size_bytes, false); if (status != DBG_STATUS_OK) return 0; hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes); /* Dump HW dump image section */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "mcp_hw_dump", 1); offset += qed_dump_num_param(dump_buf + offset, dump, "size", hw_dump_size_dwords); /* Read MCP HW dump image into dump buffer */ if (dump && hw_dump_size_dwords) { status = qed_nvram_read(p_hwfn, p_ptt, hw_dump_offset_bytes, hw_dump_size_bytes, dump_buf + offset, false); if (status != DBG_STATUS_OK) { DP_NOTICE(p_hwfn, "Failed to read MCP HW Dump image from NVRAM\n"); return 0; } } offset += hw_dump_size_dwords; return offset; } /* Dumps Static Debug data. Returns the dumped size in dwords. */ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 block_id, line_id, offset = 0, addr, len; /* Don't dump static debug if a debug bus recording is in progress */ if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON)) return 0; if (dump) { /* Disable debug bus in all blocks */ qed_bus_disable_blocks(p_hwfn, p_ptt); qed_bus_reset_dbg_block(p_hwfn, p_ptt); qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW); qed_wr(p_hwfn, p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF); qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1); qed_bus_enable_dbg_block(p_hwfn, p_ptt, true); } /* Dump all static debug lines for each relevant block */ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { const struct dbg_block_chip *block_per_chip; const struct dbg_block *block; bool is_removed, has_dbg_bus; u16 modes_buf_offset; u32 block_dwords; block_per_chip = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id); is_removed = GET_FIELD(block_per_chip->flags, DBG_BLOCK_CHIP_IS_REMOVED); has_dbg_bus = GET_FIELD(block_per_chip->flags, DBG_BLOCK_CHIP_HAS_DBG_BUS); if (!is_removed && has_dbg_bus && GET_FIELD(block_per_chip->dbg_bus_mode.data, DBG_MODE_HDR_EVAL_MODE) > 0) { modes_buf_offset = GET_FIELD(block_per_chip->dbg_bus_mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); if (!qed_is_mode_match(p_hwfn, &modes_buf_offset)) has_dbg_bus = false; } if (is_removed || !has_dbg_bus) continue; block_dwords = NUM_DBG_LINES(block_per_chip) * STATIC_DEBUG_LINE_DWORDS; /* Dump static section params */ block = get_dbg_block(p_hwfn, (enum block_id)block_id); offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, dump, block->name, 0, block_dwords, 32, false, "STATIC", 0); if (!dump) { offset += block_dwords; continue; } /* If all lines are invalid - dump zeros */ if (dev_data->block_in_reset[block_id]) { memset(dump_buf + offset, 0, DWORDS_TO_BYTES(block_dwords)); offset += block_dwords; continue; } /* Enable block's client */ qed_bus_enable_clients(p_hwfn, p_ptt, BIT(block_per_chip->dbg_client_id)); addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA); len = STATIC_DEBUG_LINE_DWORDS; for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip); line_id++) { /* Configure debug line ID */ qed_bus_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id, (u8)line_id, 0xf, 0, 0, 0); /* Read debug line info */ offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, len, true, SPLIT_TYPE_NONE, 0); } /* Disable block's client and debug output */ qed_bus_enable_clients(p_hwfn, p_ptt, 0); qed_bus_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id, 0, 0, 0, 0, 0); } if (dump) { qed_bus_enable_dbg_block(p_hwfn, p_ptt, false); qed_bus_enable_clients(p_hwfn, p_ptt, 0); } return offset; } /* Performs GRC Dump to the specified buffer. * Returns the dumped size in dwords. */ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, u32 *num_dumped_dwords) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; bool parities_masked = false; u32 dwords_read, offset = 0; u8 i; *num_dumped_dwords = 0; dev_data->num_regs_read = 0; /* Update reset state */ if (dump) qed_update_blocks_reset_state(p_hwfn, p_ptt); /* Dump global params */ offset += qed_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 4); offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "grc-dump"); offset += qed_dump_num_param(dump_buf + offset, dump, "num-lcids", NUM_OF_LCIDS); offset += qed_dump_num_param(dump_buf + offset, dump, "num-ltids", NUM_OF_LTIDS); offset += qed_dump_num_param(dump_buf + offset, dump, "num-ports", dev_data->num_ports); /* Dump reset registers (dumped before taking blocks out of reset ) */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) offset += qed_grc_dump_reset_regs(p_hwfn, p_ptt, dump_buf + offset, dump); /* Take all blocks out of reset (using reset registers) */ if (dump) { qed_grc_unreset_blocks(p_hwfn, p_ptt, false); qed_update_blocks_reset_state(p_hwfn, p_ptt); } /* Disable all parities using MFW command */ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) { parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1); if (!parities_masked) { DP_NOTICE(p_hwfn, "Failed to mask parities using MFW\n"); if (qed_grc_get_param (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE)) return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY; } } /* Dump modified registers (dumped before modifying them) */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) offset += qed_grc_dump_modified_regs(p_hwfn, p_ptt, dump_buf + offset, dump); /* Stall storms */ if (dump && (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR) || qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))) qed_grc_stall_storms(p_hwfn, p_ptt, true); /* Dump all regs */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) { bool block_enable[MAX_BLOCK_ID]; /* Dump all blocks except MCP */ for (i = 0; i < MAX_BLOCK_ID; i++) block_enable[i] = true; block_enable[BLOCK_MCP] = false; offset += qed_grc_dump_registers(p_hwfn, p_ptt, dump_buf + offset, dump, block_enable, NULL); /* Dump special registers */ offset += qed_grc_dump_special_regs(p_hwfn, p_ptt, dump_buf + offset, dump); } /* Dump memories */ offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump); /* Dump MCP */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP)) offset += qed_grc_dump_mcp(p_hwfn, p_ptt, dump_buf + offset, dump); /* Dump context */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX)) offset += qed_grc_dump_ctx(p_hwfn, p_ptt, dump_buf + offset, dump); /* Dump RSS memories */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS)) offset += qed_grc_dump_rss(p_hwfn, p_ptt, dump_buf + offset, dump); /* Dump Big RAM */ for (i = 0; i < NUM_BIG_RAM_TYPES; i++) if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param)) offset += qed_grc_dump_big_ram(p_hwfn, p_ptt, dump_buf + offset, dump, i); /* Dump VFC */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) { dwords_read = qed_grc_dump_vfc(p_hwfn, p_ptt, dump_buf + offset, dump); offset += dwords_read; if (!dwords_read) return DBG_STATUS_VFC_READ_ERROR; } /* Dump PHY tbus */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id == CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC) offset += qed_grc_dump_phy(p_hwfn, p_ptt, dump_buf + offset, dump); /* Dump MCP HW Dump */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1) offset += qed_grc_dump_mcp_hw_dump(p_hwfn, p_ptt, dump_buf + offset, dump); /* Dump static debug data (only if not during debug bus recording) */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_STATIC) && (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE)) offset += qed_grc_dump_static_debug(p_hwfn, p_ptt, dump_buf + offset, dump); /* Dump last section */ offset += qed_dump_last_section(dump_buf, offset, dump); if (dump) { /* Unstall storms */ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL)) qed_grc_stall_storms(p_hwfn, p_ptt, false); /* Clear parity status */ qed_grc_clear_all_prty(p_hwfn, p_ptt); /* Enable all parities using MFW command */ if (parities_masked) qed_mcp_mask_parities(p_hwfn, p_ptt, 0); } *num_dumped_dwords = offset; return DBG_STATUS_OK; } /* Writes the specified failing Idle Check rule to the specified buffer. * Returns the dumped size in dwords. */ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, u16 rule_id, const struct dbg_idle_chk_rule *rule, u16 fail_entry_id, u32 *cond_reg_values) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; const struct dbg_idle_chk_cond_reg *cond_regs; const struct dbg_idle_chk_info_reg *info_regs; u32 i, next_reg_offset = 0, offset = 0; struct dbg_idle_chk_result_hdr *hdr; const union dbg_idle_chk_reg *regs; u8 reg_id; hdr = (struct dbg_idle_chk_result_hdr *)dump_buf; regs = (const union dbg_idle_chk_reg *) p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr + rule->reg_offset; cond_regs = &regs[0].cond_reg; info_regs = &regs[rule->num_cond_regs].info_reg; /* Dump rule data */ if (dump) { memset(hdr, 0, sizeof(*hdr)); hdr->rule_id = rule_id; hdr->mem_entry_id = fail_entry_id; hdr->severity = rule->severity; hdr->num_dumped_cond_regs = rule->num_cond_regs; } offset += IDLE_CHK_RESULT_HDR_DWORDS; /* Dump condition register values */ for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) { const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id]; struct dbg_idle_chk_result_reg_hdr *reg_hdr; reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset); /* Write register header */ if (!dump) { offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->entry_size; continue; } offset += IDLE_CHK_RESULT_REG_HDR_DWORDS; memset(reg_hdr, 0, sizeof(*reg_hdr)); reg_hdr->start_entry = reg->start_entry; reg_hdr->size = reg->entry_size; SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0); SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id); /* Write register values */ for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++) dump_buf[offset] = cond_reg_values[next_reg_offset]; } /* Dump info register values */ for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) { const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id]; u32 block_id; /* Check if register's block is in reset */ if (!dump) { offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size; continue; } block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID); if (block_id >= MAX_BLOCK_ID) { DP_NOTICE(p_hwfn, "Invalid block_id\n"); return 0; } if (!dev_data->block_in_reset[block_id]) { struct dbg_idle_chk_result_reg_hdr *reg_hdr; bool wide_bus, eval_mode, mode_match = true; u16 modes_buf_offset; u32 addr; reg_hdr = (struct dbg_idle_chk_result_reg_hdr *) (dump_buf + offset); /* Check mode */ eval_mode = GET_FIELD(reg->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0; if (eval_mode) { modes_buf_offset = GET_FIELD(reg->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); mode_match = qed_is_mode_match(p_hwfn, &modes_buf_offset); } if (!mode_match) continue; addr = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_ADDRESS); wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_WIDE_BUS); /* Write register header */ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS; hdr->num_dumped_info_regs++; memset(reg_hdr, 0, sizeof(*reg_hdr)); reg_hdr->size = reg->size; SET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, rule->num_cond_regs + reg_id); /* Write register values */ offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, reg->size, wide_bus, SPLIT_TYPE_NONE, 0); } } return offset; } /* Dumps idle check rule entries. Returns the dumped size in dwords. */ static u32 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, const struct dbg_idle_chk_rule *input_rules, u32 num_input_rules, u32 *num_failing_rules) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE]; u32 i, offset = 0; u16 entry_id; u8 reg_id; *num_failing_rules = 0; for (i = 0; i < num_input_rules; i++) { const struct dbg_idle_chk_cond_reg *cond_regs; const struct dbg_idle_chk_rule *rule; const union dbg_idle_chk_reg *regs; u16 num_reg_entries = 1; bool check_rule = true; const u32 *imm_values; rule = &input_rules[i]; regs = (const union dbg_idle_chk_reg *) p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr + rule->reg_offset; cond_regs = &regs[0].cond_reg; imm_values = (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr + rule->imm_offset; /* Check if all condition register blocks are out of reset, and * find maximal number of entries (all condition registers that * are memories must have the same size, which is > 1). */ for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule; reg_id++) { u32 block_id = GET_FIELD(cond_regs[reg_id].data, DBG_IDLE_CHK_COND_REG_BLOCK_ID); if (block_id >= MAX_BLOCK_ID) { DP_NOTICE(p_hwfn, "Invalid block_id\n"); return 0; } check_rule = !dev_data->block_in_reset[block_id]; if (cond_regs[reg_id].num_entries > num_reg_entries) num_reg_entries = cond_regs[reg_id].num_entries; } if (!check_rule && dump) continue; if (!dump) { u32 entry_dump_size = qed_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, false, rule->rule_id, rule, 0, NULL); offset += num_reg_entries * entry_dump_size; (*num_failing_rules) += num_reg_entries; continue; } /* Go over all register entries (number of entries is the same * for all condition registers). */ for (entry_id = 0; entry_id < num_reg_entries; entry_id++) { u32 next_reg_offset = 0; /* Read current entry of all condition registers */ for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) { const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id]; u32 padded_entry_size, addr; bool wide_bus; /* Find GRC address (if it's a memory, the * address of the specific entry is calculated). */ addr = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_ADDRESS); wide_bus = GET_FIELD(reg->data, DBG_IDLE_CHK_COND_REG_WIDE_BUS); if (reg->num_entries > 1 || reg->start_entry > 0) { padded_entry_size = reg->entry_size > 1 ? roundup_pow_of_two(reg->entry_size) : 1; addr += (reg->start_entry + entry_id) * padded_entry_size; } /* Read registers */ if (next_reg_offset + reg->entry_size >= IDLE_CHK_MAX_ENTRIES_SIZE) { DP_NOTICE(p_hwfn, "idle check registers entry is too large\n"); return 0; } next_reg_offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, cond_reg_values + next_reg_offset, dump, addr, reg->entry_size, wide_bus, SPLIT_TYPE_NONE, 0); } /* Call rule condition function. * If returns true, it's a failure. */ if ((*cond_arr[rule->cond_id]) (cond_reg_values, imm_values)) { offset += qed_idle_chk_dump_failure(p_hwfn, p_ptt, dump_buf + offset, dump, rule->rule_id, rule, entry_id, cond_reg_values); (*num_failing_rules)++; } } } return offset; } /* Performs Idle Check Dump to the specified buffer. * Returns the dumped size in dwords. */ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { struct virt_mem_desc *dbg_buf = &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES]; u32 num_failing_rules_offset, offset = 0, input_offset = 0, num_failing_rules = 0; /* Dump global params - 1 must match below amount of params */ offset += qed_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1); offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "idle-chk"); /* Dump idle check section header with a single parameter */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1); num_failing_rules_offset = offset; offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0); while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) { const struct dbg_idle_chk_cond_hdr *cond_hdr = (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr + input_offset++; bool eval_mode, mode_match = true; u32 curr_failing_rules; u16 modes_buf_offset; /* Check mode */ eval_mode = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0; if (eval_mode) { modes_buf_offset = GET_FIELD(cond_hdr->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); mode_match = qed_is_mode_match(p_hwfn, &modes_buf_offset); } if (mode_match) { const struct dbg_idle_chk_rule *rule = (const struct dbg_idle_chk_rule *)((u32 *) dbg_buf->ptr + input_offset); u32 num_input_rules = cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS; offset += qed_idle_chk_dump_rule_entries(p_hwfn, p_ptt, dump_buf + offset, dump, rule, num_input_rules, &curr_failing_rules); num_failing_rules += curr_failing_rules; } input_offset += cond_hdr->data_size; } /* Overwrite num_rules parameter */ if (dump) qed_dump_num_param(dump_buf + num_failing_rules_offset, dump, "num_rules", num_failing_rules); /* Dump last section */ offset += qed_dump_last_section(dump_buf, offset, dump); return offset; } /* Get info on the MCP Trace data in the scratchpad: * - trace_data_grc_addr (OUT): trace data GRC address in bytes * - trace_data_size (OUT): trace data size in bytes (without the header) */ static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *trace_data_grc_addr, u32 *trace_data_size) { u32 spad_trace_offsize, signature; /* Read trace section offsize structure from MCP scratchpad */ spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR); /* Extract trace section address from offsize (in scratchpad) */ *trace_data_grc_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize); /* Read signature from MCP trace section */ signature = qed_rd(p_hwfn, p_ptt, *trace_data_grc_addr + offsetof(struct mcp_trace, signature)); if (signature != MFW_TRACE_SIGNATURE) return DBG_STATUS_INVALID_TRACE_SIGNATURE; /* Read trace size from MCP trace section */ *trace_data_size = qed_rd(p_hwfn, p_ptt, *trace_data_grc_addr + offsetof(struct mcp_trace, size)); return DBG_STATUS_OK; } /* Reads MCP trace meta data image from NVRAM * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file) * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when * loaded from file). * - trace_meta_size (OUT): size in bytes of the trace meta data. */ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 trace_data_size_bytes, u32 *running_bundle_id, u32 *trace_meta_offset, u32 *trace_meta_size) { u32 spad_trace_offsize, nvram_image_type, running_mfw_addr; /* Read MCP trace section offsize structure from MCP scratchpad */ spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR); /* Find running bundle ID */ running_mfw_addr = MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes; *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr); if (*running_bundle_id > 1) return DBG_STATUS_INVALID_NVRAM_BUNDLE; /* Find image in NVRAM */ nvram_image_type = (*running_bundle_id == DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2; return qed_find_nvram_image(p_hwfn, p_ptt, nvram_image_type, trace_meta_offset, trace_meta_size, true); } /* Reads the MCP Trace meta data from NVRAM into the specified buffer */ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 nvram_offset_in_bytes, u32 size_in_bytes, u32 *buf) { u8 modules_num, module_len, i, *byte_buf = (u8 *)buf; enum dbg_status status; u32 signature; /* Read meta data from NVRAM */ status = qed_nvram_read(p_hwfn, p_ptt, nvram_offset_in_bytes, size_in_bytes, buf, true); if (status != DBG_STATUS_OK) return status; /* Extract and check first signature */ signature = qed_read_unaligned_dword(byte_buf); byte_buf += sizeof(signature); if (signature != NVM_MAGIC_VALUE) return DBG_STATUS_INVALID_TRACE_SIGNATURE; /* Extract number of modules */ modules_num = *(byte_buf++); /* Skip all modules */ for (i = 0; i < modules_num; i++) { module_len = *(byte_buf++); byte_buf += module_len; } /* Extract and check second signature */ signature = qed_read_unaligned_dword(byte_buf); byte_buf += sizeof(signature); if (signature != NVM_MAGIC_VALUE) return DBG_STATUS_INVALID_TRACE_SIGNATURE; return DBG_STATUS_OK; } /* Dump MCP Trace */ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, u32 *num_dumped_dwords) { u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords; u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0; u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0; enum dbg_status status; int halted = 0; bool use_mfw; *num_dumped_dwords = 0; use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP); /* Get trace data info */ status = qed_mcp_trace_get_data_info(p_hwfn, p_ptt, &trace_data_grc_addr, &trace_data_size_bytes); if (status != DBG_STATUS_OK) return status; /* Dump global params */ offset += qed_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1); offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "mcp-trace"); /* Halt MCP while reading from scratchpad so the read data will be * consistent. if halt fails, MCP trace is taken anyway, with a small * risk that it may be corrupt. */ if (dump && use_mfw) { halted = !qed_mcp_halt(p_hwfn, p_ptt); if (!halted) DP_NOTICE(p_hwfn, "MCP halt failed!\n"); } /* Find trace data size */ trace_data_size_dwords = DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), BYTES_IN_DWORD); /* Dump trace data section header and param */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_data", 1); offset += qed_dump_num_param(dump_buf + offset, dump, "size", trace_data_size_dwords); /* Read trace data from scratchpad into dump buffer */ offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), trace_data_size_dwords, false, SPLIT_TYPE_NONE, 0); /* Resume MCP (only if halt succeeded) */ if (halted && qed_mcp_resume(p_hwfn, p_ptt)) DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n"); /* Dump trace meta section header */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "mcp_trace_meta", 1); /* If MCP Trace meta size parameter was set, use it. * Otherwise, read trace meta. * trace_meta_size_bytes is dword-aligned. */ trace_meta_size_bytes = qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE); if ((!trace_meta_size_bytes || dump) && use_mfw) status = qed_mcp_trace_get_meta_info(p_hwfn, p_ptt, trace_data_size_bytes, &running_bundle_id, &trace_meta_offset_bytes, &trace_meta_size_bytes); if (status == DBG_STATUS_OK) trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes); /* Dump trace meta size param */ offset += qed_dump_num_param(dump_buf + offset, dump, "size", trace_meta_size_dwords); /* Read trace meta image into dump buffer */ if (dump && trace_meta_size_dwords) status = qed_mcp_trace_read_meta(p_hwfn, p_ptt, trace_meta_offset_bytes, trace_meta_size_bytes, dump_buf + offset); if (status == DBG_STATUS_OK) offset += trace_meta_size_dwords; /* Dump last section */ offset += qed_dump_last_section(dump_buf, offset, dump); *num_dumped_dwords = offset; /* If no mcp access, indicate that the dump doesn't contain the meta * data from NVRAM. */ return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED; } /* Dump GRC FIFO */ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, u32 *num_dumped_dwords) { u32 dwords_read, size_param_offset, offset = 0, addr, len; bool fifo_has_data; *num_dumped_dwords = 0; /* Dump global params */ offset += qed_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1); offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "reg-fifo"); /* Dump fifo data section header and param. The size param is 0 for * now, and is overwritten after reading the FIFO. */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "reg_fifo_data", 1); size_param_offset = offset; offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0); if (!dump) { /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to * test how much data is available, except for reading it. */ offset += REG_FIFO_DEPTH_DWORDS; goto out; } fifo_has_data = qed_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0; /* Pull available data from fifo. Use DMAE since this is widebus memory * and must be accessed atomically. Test for dwords_read not passing * buffer size since more entries could be added to the buffer as we are * emptying it. */ addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO); len = REG_FIFO_ELEMENT_DWORDS; for (dwords_read = 0; fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS; dwords_read += REG_FIFO_ELEMENT_DWORDS) { offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, true, addr, len, true, SPLIT_TYPE_NONE, 0); fifo_has_data = qed_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0; } qed_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read); out: /* Dump last section */ offset += qed_dump_last_section(dump_buf, offset, dump); *num_dumped_dwords = offset; return DBG_STATUS_OK; } /* Dump IGU FIFO */ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, u32 *num_dumped_dwords) { u32 dwords_read, size_param_offset, offset = 0, addr, len; bool fifo_has_data; *num_dumped_dwords = 0; /* Dump global params */ offset += qed_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1); offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "igu-fifo"); /* Dump fifo data section header and param. The size param is 0 for * now, and is overwritten after reading the FIFO. */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "igu_fifo_data", 1); size_param_offset = offset; offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0); if (!dump) { /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to * test how much data is available, except for reading it. */ offset += IGU_FIFO_DEPTH_DWORDS; goto out; } fifo_has_data = qed_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0; /* Pull available data from fifo. Use DMAE since this is widebus memory * and must be accessed atomically. Test for dwords_read not passing * buffer size since more entries could be added to the buffer as we are * emptying it. */ addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY); len = IGU_FIFO_ELEMENT_DWORDS; for (dwords_read = 0; fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS; dwords_read += IGU_FIFO_ELEMENT_DWORDS) { offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, true, addr, len, true, SPLIT_TYPE_NONE, 0); fifo_has_data = qed_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0; } qed_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read); out: /* Dump last section */ offset += qed_dump_last_section(dump_buf, offset, dump); *num_dumped_dwords = offset; return DBG_STATUS_OK; } /* Protection Override dump */ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, u32 *num_dumped_dwords) { u32 size_param_offset, override_window_dwords, offset = 0, addr; *num_dumped_dwords = 0; /* Dump global params */ offset += qed_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1); offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "protection-override"); /* Dump data section header and param. The size param is 0 for now, * and is overwritten after reading the data. */ offset += qed_dump_section_hdr(dump_buf + offset, dump, "protection_override_data", 1); size_param_offset = offset; offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0); if (!dump) { offset += PROTECTION_OVERRIDE_DEPTH_DWORDS; goto out; } /* Add override window info to buffer */ override_window_dwords = qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * PROTECTION_OVERRIDE_ELEMENT_DWORDS; if (override_window_dwords) { addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW); offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, true, addr, override_window_dwords, true, SPLIT_TYPE_NONE, 0); qed_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords); } out: /* Dump last section */ offset += qed_dump_last_section(dump_buf, offset, dump); *num_dumped_dwords = offset; return DBG_STATUS_OK; } /* Performs FW Asserts Dump to the specified buffer. * Returns the dumped size in dwords. */ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; struct fw_asserts_ram_section *asserts; char storm_letter_str[2] = "?"; struct fw_info fw_info; u32 offset = 0; u8 storm_id; /* Dump global params */ offset += qed_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 1); offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "fw-asserts"); /* Find Storm dump size */ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx; struct storm_defs *storm = &s_storm_defs[storm_id]; u32 last_list_idx, addr; if (dev_data->block_in_reset[storm->sem_block_id]) continue; /* Read FW info for the current Storm */ qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); asserts = &fw_info.fw_asserts_section; /* Dump FW Asserts section header and params */ storm_letter_str[0] = storm->letter; offset += qed_dump_section_hdr(dump_buf + offset, dump, "fw_asserts", 2); offset += qed_dump_str_param(dump_buf + offset, dump, "storm", storm_letter_str); offset += qed_dump_num_param(dump_buf + offset, dump, "size", asserts->list_element_dword_size); /* Read and dump FW Asserts data */ if (!dump) { offset += asserts->list_element_dword_size; continue; } addr = le16_to_cpu(asserts->section_ram_line_offset); fw_asserts_section_addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + RAM_LINES_TO_BYTES(addr); next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset); next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr); last_list_idx = (next_list_idx > 0 ? next_list_idx : asserts->list_num_elements) - 1; addr = BYTES_TO_DWORDS(fw_asserts_section_addr) + asserts->list_dword_offset + last_list_idx * asserts->list_element_dword_size; offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, asserts->list_element_dword_size, false, SPLIT_TYPE_NONE, 0); } /* Dump last section */ offset += qed_dump_last_section(dump_buf, offset, dump); return offset; } /* Dumps the specified ILT pages to the specified buffer. * Returns the dumped size in dwords. */ static u32 qed_ilt_dump_pages_range(u32 *dump_buf, u32 *given_offset, bool *dump, u32 start_page_id, u32 num_pages, struct phys_mem_desc *ilt_pages, bool dump_page_ids, u32 buf_size_in_dwords, u32 *given_actual_dump_size_in_dwords) { u32 actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords; u32 page_id, end_page_id, offset = *given_offset; struct phys_mem_desc *mem_desc = NULL; bool continue_dump = *dump; u32 partial_page_size = 0; if (num_pages == 0) return offset; end_page_id = start_page_id + num_pages - 1; for (page_id = start_page_id; page_id <= end_page_id; page_id++) { mem_desc = &ilt_pages[page_id]; if (!ilt_pages[page_id].virt_addr) continue; if (dump_page_ids) { /* Copy page ID to dump buffer * (if dump is needed and buffer is not full) */ if ((continue_dump) && (offset + 1 > buf_size_in_dwords)) { continue_dump = false; actual_dump_size_in_dwords = offset; } if (continue_dump) *(dump_buf + offset) = page_id; offset++; } else { /* Copy page memory to dump buffer */ if ((continue_dump) && (offset + BYTES_TO_DWORDS(mem_desc->size) > buf_size_in_dwords)) { if (offset + BYTES_TO_DWORDS(mem_desc->size) > buf_size_in_dwords) { partial_page_size = buf_size_in_dwords - offset; memcpy(dump_buf + offset, mem_desc->virt_addr, partial_page_size); continue_dump = false; actual_dump_size_in_dwords = offset + partial_page_size; } } if (continue_dump) memcpy(dump_buf + offset, mem_desc->virt_addr, mem_desc->size); offset += BYTES_TO_DWORDS(mem_desc->size); } } *dump = continue_dump; *given_offset = offset; *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords; return offset; } /* Dumps a section containing the dumped ILT pages. * Returns the dumped size in dwords. */ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 *given_offset, bool *dump, u32 valid_conn_pf_pages, u32 valid_conn_vf_pages, struct phys_mem_desc *ilt_pages, bool dump_page_ids, u32 buf_size_in_dwords, u32 *given_actual_dump_size_in_dwords) { struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; u32 pf_start_line, start_page_id, offset = *given_offset; u32 cdut_pf_init_pages, cdut_vf_init_pages; u32 cdut_pf_work_pages, cdut_vf_work_pages; u32 base_data_offset, size_param_offset; u32 src_pages; u32 section_header_and_param_size; u32 cdut_pf_pages, cdut_vf_pages; u32 actual_dump_size_in_dwords; bool continue_dump = *dump; bool update_size = *dump; const char *section_name; u32 i; actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords; section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem"; cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn); cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn); cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn); cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn); cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages; cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages; pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line; section_header_and_param_size = qed_dump_section_hdr(NULL, false, section_name, 1) + qed_dump_num_param(NULL, false, "size", 0); if ((continue_dump) && (offset + section_header_and_param_size > buf_size_in_dwords)) { continue_dump = false; update_size = false; actual_dump_size_in_dwords = offset; } offset += qed_dump_section_hdr(dump_buf + offset, continue_dump, section_name, 1); /* Dump size parameter (0 for now, overwritten with real size later) */ size_param_offset = offset; offset += qed_dump_num_param(dump_buf + offset, continue_dump, "size", 0); base_data_offset = offset; /* CDUC pages are ordered as follows: * - PF pages - valid section (included in PF connection type mapping) * - PF pages - invalid section (not dumped) * - For each VF in the PF: * - VF pages - valid section (included in VF connection type mapping) * - VF pages - invalid section (not dumped) */ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) { /* Dump connection PF pages */ start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line; qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, start_page_id, valid_conn_pf_pages, ilt_pages, dump_page_ids, buf_size_in_dwords, &actual_dump_size_in_dwords); /* Dump connection VF pages */ start_page_id += clients[ILT_CLI_CDUC].pf_total_lines; for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count; i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines) qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, start_page_id, valid_conn_vf_pages, ilt_pages, dump_page_ids, buf_size_in_dwords, &actual_dump_size_in_dwords); } /* CDUT pages are ordered as follows: * - PF init pages (not dumped) * - PF work pages * - For each VF in the PF: * - VF init pages (not dumped) * - VF work pages */ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) { /* Dump task PF pages */ start_page_id = clients[ILT_CLI_CDUT].first.val + cdut_pf_init_pages - pf_start_line; qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, start_page_id, cdut_pf_work_pages, ilt_pages, dump_page_ids, buf_size_in_dwords, &actual_dump_size_in_dwords); /* Dump task VF pages */ start_page_id = clients[ILT_CLI_CDUT].first.val + cdut_pf_pages + cdut_vf_init_pages - pf_start_line; for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count; i++, start_page_id += cdut_vf_pages) qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, start_page_id, cdut_vf_work_pages, ilt_pages, dump_page_ids, buf_size_in_dwords, &actual_dump_size_in_dwords); } /*Dump Searcher pages */ if (clients[ILT_CLI_SRC].active) { start_page_id = clients[ILT_CLI_SRC].first.val - pf_start_line; src_pages = clients[ILT_CLI_SRC].last.val - clients[ILT_CLI_SRC].first.val + 1; qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, start_page_id, src_pages, ilt_pages, dump_page_ids, buf_size_in_dwords, &actual_dump_size_in_dwords); } /* Overwrite size param */ if (update_size) { u32 section_size = (*dump == continue_dump) ? offset - base_data_offset : actual_dump_size_in_dwords - base_data_offset; if (section_size > 0) qed_dump_num_param(dump_buf + size_param_offset, *dump, "size", section_size); else if ((section_size == 0) && (*dump != continue_dump)) actual_dump_size_in_dwords -= section_header_and_param_size; } *dump = continue_dump; *given_offset = offset; *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords; return offset; } /* Dumps a section containing the global parameters. * Part of ilt dump process * Returns the dumped size in dwords. */ static u32 qed_ilt_dump_dump_common_global_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump, u32 cduc_page_size, u32 conn_ctx_size, u32 cdut_page_size, u32 *full_dump_size_param_offset, u32 *actual_dump_size_param_offset) { struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; u32 offset = 0; offset += qed_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, dump, 30); offset += qed_dump_str_param(dump_buf + offset, dump, "dump-type", "ilt-dump"); offset += qed_dump_num_param(dump_buf + offset, dump, "cduc-page-size", cduc_page_size); offset += qed_dump_num_param(dump_buf + offset, dump, "cduc-first-page-id", clients[ILT_CLI_CDUC].first.val); offset += qed_dump_num_param(dump_buf + offset, dump, "cduc-last-page-id", clients[ILT_CLI_CDUC].last.val); offset += qed_dump_num_param(dump_buf + offset, dump, "cduc-num-pf-pages", clients[ILT_CLI_CDUC].pf_total_lines); offset += qed_dump_num_param(dump_buf + offset, dump, "cduc-num-vf-pages", clients[ILT_CLI_CDUC].vf_total_lines); offset += qed_dump_num_param(dump_buf + offset, dump, "max-conn-ctx-size", conn_ctx_size); offset += qed_dump_num_param(dump_buf + offset, dump, "cdut-page-size", cdut_page_size); offset += qed_dump_num_param(dump_buf + offset, dump, "cdut-first-page-id", clients[ILT_CLI_CDUT].first.val); offset += qed_dump_num_param(dump_buf + offset, dump, "cdut-last-page-id", clients[ILT_CLI_CDUT].last.val); offset += qed_dump_num_param(dump_buf + offset, dump, "cdut-num-pf-init-pages", qed_get_cdut_num_pf_init_pages(p_hwfn)); offset += qed_dump_num_param(dump_buf + offset, dump, "cdut-num-vf-init-pages", qed_get_cdut_num_vf_init_pages(p_hwfn)); offset += qed_dump_num_param(dump_buf + offset, dump, "cdut-num-pf-work-pages", qed_get_cdut_num_pf_work_pages(p_hwfn)); offset += qed_dump_num_param(dump_buf + offset, dump, "cdut-num-vf-work-pages", qed_get_cdut_num_vf_work_pages(p_hwfn)); offset += qed_dump_num_param(dump_buf + offset, dump, "max-task-ctx-size", p_hwfn->p_cxt_mngr->task_ctx_size); offset += qed_dump_num_param(dump_buf + offset, dump, "first-vf-id-in-pf", p_hwfn->p_cxt_mngr->first_vf_in_pf); offset += qed_dump_num_param(dump_buf + offset, dump, "num-vfs-in-pf", p_hwfn->p_cxt_mngr->vf_count); offset += qed_dump_num_param(dump_buf + offset, dump, "ptr-size-bytes", sizeof(void *)); offset += qed_dump_num_param(dump_buf + offset, dump, "pf-start-line", p_hwfn->p_cxt_mngr->pf_start_line); offset += qed_dump_num_param(dump_buf + offset, dump, "page-mem-desc-size-dwords", PAGE_MEM_DESC_SIZE_DWORDS); offset += qed_dump_num_param(dump_buf + offset, dump, "ilt-shadow-size", p_hwfn->p_cxt_mngr->ilt_shadow_size); *full_dump_size_param_offset = offset; offset += qed_dump_num_param(dump_buf + offset, dump, "dump-size-full", 0); *actual_dump_size_param_offset = offset; offset += qed_dump_num_param(dump_buf + offset, dump, "dump-size-actual", 0); offset += qed_dump_num_param(dump_buf + offset, dump, "iscsi_task_pages", p_hwfn->p_cxt_mngr->iscsi_task_pages); offset += qed_dump_num_param(dump_buf + offset, dump, "fcoe_task_pages", p_hwfn->p_cxt_mngr->fcoe_task_pages); offset += qed_dump_num_param(dump_buf + offset, dump, "roce_task_pages", p_hwfn->p_cxt_mngr->roce_task_pages); offset += qed_dump_num_param(dump_buf + offset, dump, "eth_task_pages", p_hwfn->p_cxt_mngr->eth_task_pages); offset += qed_dump_num_param(dump_buf + offset, dump, "src-first-page-id", clients[ILT_CLI_SRC].first.val); offset += qed_dump_num_param(dump_buf + offset, dump, "src-last-page-id", clients[ILT_CLI_SRC].last.val); offset += qed_dump_num_param(dump_buf + offset, dump, "src-is-active", clients[ILT_CLI_SRC].active); /* Additional/Less parameters require matching of number in call to * dump_common_global_params() */ return offset; } /* Dump section containing number of PF CIDs per connection type. * Part of ilt dump process. * Returns the dumped size in dwords. */ static u32 qed_ilt_dump_dump_num_pf_cids(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u32 *valid_conn_pf_cids) { u32 num_pf_cids = 0; u32 offset = 0; u8 conn_type; offset += qed_dump_section_hdr(dump_buf + offset, dump, "num_pf_cids_per_conn_type", 1); offset += qed_dump_num_param(dump_buf + offset, dump, "size", NUM_OF_CONNECTION_TYPES); for (conn_type = 0, *valid_conn_pf_cids = 0; conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) { num_pf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count; if (dump) *(dump_buf + offset) = num_pf_cids; *valid_conn_pf_cids += num_pf_cids; } return offset; } /* Dump section containing number of VF CIDs per connection type * Part of ilt dump process. * Returns the dumped size in dwords. */ static u32 qed_ilt_dump_dump_num_vf_cids(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u32 *valid_conn_vf_cids) { u32 num_vf_cids = 0; u32 offset = 0; u8 conn_type; offset += qed_dump_section_hdr(dump_buf + offset, dump, "num_vf_cids_per_conn_type", 1); offset += qed_dump_num_param(dump_buf + offset, dump, "size", NUM_OF_CONNECTION_TYPES); for (conn_type = 0, *valid_conn_vf_cids = 0; conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) { num_vf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf; if (dump) *(dump_buf + offset) = num_vf_cids; *valid_conn_vf_cids += num_vf_cids; } return offset; } /* Performs ILT Dump to the specified buffer. * buf_size_in_dwords - The dumped buffer size. * Returns the dumped size in dwords. */ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, u32 buf_size_in_dwords, bool dump) { #if ((!defined VMWARE) && (!defined UEFI)) struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; #endif u32 valid_conn_vf_cids = 0, valid_conn_vf_pages, offset = 0, real_dumped_size = 0; u32 valid_conn_pf_cids = 0, valid_conn_pf_pages, num_pages; u32 num_cids_per_page, conn_ctx_size; u32 cduc_page_size, cdut_page_size; u32 actual_dump_size_in_dwords = 0; struct phys_mem_desc *ilt_pages; u32 actul_dump_off = 0; u32 last_section_size; u32 full_dump_off = 0; u32 section_size = 0; bool continue_dump; u32 page_id; last_section_size = qed_dump_last_section(NULL, 0, false); cduc_page_size = 1 << (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); cdut_page_size = 1 << (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size; num_cids_per_page = (int)(cduc_page_size / conn_ctx_size); ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow; continue_dump = dump; /* if need to dump then save memory for the last section * (last section calculates CRC of dumped data) */ if (dump) { if (buf_size_in_dwords >= last_section_size) { buf_size_in_dwords -= last_section_size; } else { continue_dump = false; actual_dump_size_in_dwords = offset; } } /* Dump global params */ /* if need to dump then first check that there is enough memory * in dumped buffer for this section calculate the size of this * section without dumping. if there is not enough memory - then * stop the dumping. */ if (continue_dump) { section_size = qed_ilt_dump_dump_common_global_params(p_hwfn, p_ptt, NULL, false, cduc_page_size, conn_ctx_size, cdut_page_size, &full_dump_off, &actul_dump_off); if (offset + section_size > buf_size_in_dwords) { continue_dump = false; actual_dump_size_in_dwords = offset; } } offset += qed_ilt_dump_dump_common_global_params(p_hwfn, p_ptt, dump_buf + offset, continue_dump, cduc_page_size, conn_ctx_size, cdut_page_size, &full_dump_off, &actul_dump_off); /* Dump section containing number of PF CIDs per connection type * If need to dump then first check that there is enough memory in * dumped buffer for this section. */ if (continue_dump) { section_size = qed_ilt_dump_dump_num_pf_cids(p_hwfn, NULL, false, &valid_conn_pf_cids); if (offset + section_size > buf_size_in_dwords) { continue_dump = false; actual_dump_size_in_dwords = offset; } } offset += qed_ilt_dump_dump_num_pf_cids(p_hwfn, dump_buf + offset, continue_dump, &valid_conn_pf_cids); /* Dump section containing number of VF CIDs per connection type * If need to dump then first check that there is enough memory in * dumped buffer for this section. */ if (continue_dump) { section_size = qed_ilt_dump_dump_num_vf_cids(p_hwfn, NULL, false, &valid_conn_vf_cids); if (offset + section_size > buf_size_in_dwords) { continue_dump = false; actual_dump_size_in_dwords = offset; } } offset += qed_ilt_dump_dump_num_vf_cids(p_hwfn, dump_buf + offset, continue_dump, &valid_conn_vf_cids); /* Dump section containing physical memory descriptors for each * ILT page. */ num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size; /* If need to dump then first check that there is enough memory * in dumped buffer for the section header. */ if (continue_dump) { section_size = qed_dump_section_hdr(NULL, false, "ilt_page_desc", 1) + qed_dump_num_param(NULL, false, "size", num_pages * PAGE_MEM_DESC_SIZE_DWORDS); if (offset + section_size > buf_size_in_dwords) { continue_dump = false; actual_dump_size_in_dwords = offset; } } offset += qed_dump_section_hdr(dump_buf + offset, continue_dump, "ilt_page_desc", 1); offset += qed_dump_num_param(dump_buf + offset, continue_dump, "size", num_pages * PAGE_MEM_DESC_SIZE_DWORDS); /* Copy memory descriptors to dump buffer * If need to dump then dump till the dump buffer size */ if (continue_dump) { for (page_id = 0; page_id < num_pages; page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) { if (continue_dump && (offset + PAGE_MEM_DESC_SIZE_DWORDS <= buf_size_in_dwords)) { memcpy(dump_buf + offset, &ilt_pages[page_id], DWORDS_TO_BYTES (PAGE_MEM_DESC_SIZE_DWORDS)); } else { if (continue_dump) { continue_dump = false; actual_dump_size_in_dwords = offset; } } } } else { offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS; } valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids, num_cids_per_page); valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids, num_cids_per_page); /* Dump ILT pages IDs */ qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump, valid_conn_pf_pages, valid_conn_vf_pages, ilt_pages, true, buf_size_in_dwords, &actual_dump_size_in_dwords); /* Dump ILT pages memory */ qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump, valid_conn_pf_pages, valid_conn_vf_pages, ilt_pages, false, buf_size_in_dwords, &actual_dump_size_in_dwords); real_dumped_size = (continue_dump == dump) ? offset : actual_dump_size_in_dwords; qed_dump_num_param(dump_buf + full_dump_off, dump, "full-dump-size", offset + last_section_size); qed_dump_num_param(dump_buf + actul_dump_off, dump, "actual-dump-size", real_dumped_size + last_section_size); /* Dump last section */ real_dumped_size += qed_dump_last_section(dump_buf, real_dumped_size, dump); return real_dumped_size; } /***************************** Public Functions *******************************/ enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, const u8 * const bin_ptr) { struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr; u8 buf_id; /* Convert binary data to debug arrays */ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) qed_set_dbg_bin_buf(p_hwfn, buf_id, (u32 *)(bin_ptr + buf_hdrs[buf_id].offset), buf_hdrs[buf_id].length); return DBG_STATUS_OK; } static enum dbg_status qed_dbg_set_app_ver(u32 ver) { if (ver < TOOLS_VERSION) return DBG_STATUS_UNSUPPORTED_APP_VERSION; s_app_ver = ver; return DBG_STATUS_OK; } bool qed_read_fw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct fw_info *fw_info) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u8 storm_id; for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { struct storm_defs *storm = &s_storm_defs[storm_id]; /* Skip Storm if it's in reset */ if (dev_data->block_in_reset[storm->sem_block_id]) continue; /* Read FW info for the current Storm */ qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info); return true; } return false; } enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn, enum dbg_grc_params grc_param, u32 val) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; enum dbg_status status; int i; DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val); status = qed_dbg_dev_init(p_hwfn); if (status != DBG_STATUS_OK) return status; /* Initializes the GRC parameters (if not initialized). Needed in order * to set the default parameter values for the first time. */ qed_dbg_grc_init_params(p_hwfn); if (grc_param >= MAX_DBG_GRC_PARAMS) return DBG_STATUS_INVALID_ARGS; if (val < s_grc_param_defs[grc_param].min || val > s_grc_param_defs[grc_param].max) return DBG_STATUS_INVALID_ARGS; if (s_grc_param_defs[grc_param].is_preset) { /* Preset param */ /* Disabling a preset is not allowed. Call * dbg_grc_set_params_default instead. */ if (!val) return DBG_STATUS_INVALID_ARGS; /* Update all params with the preset values */ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) { struct grc_param_defs *defs = &s_grc_param_defs[i]; u32 preset_val; /* Skip persistent params */ if (defs->is_persistent) continue; /* Find preset value */ if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL) preset_val = defs->exclude_all_preset_val; else if (grc_param == DBG_GRC_PARAM_CRASH) preset_val = defs->crash_preset_val[dev_data->chip_id]; else return DBG_STATUS_INVALID_ARGS; qed_grc_set_param(p_hwfn, i, preset_val); } } else { /* Regular param - set its value */ qed_grc_set_param(p_hwfn, grc_param, val); } return DBG_STATUS_OK; } /* Assign default GRC param values */ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 i; for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) if (!s_grc_param_defs[i].is_persistent) dev_data->grc.param_val[i] = s_grc_param_defs[i].default_val[dev_data->chip_id]; } enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { enum dbg_status status = qed_dbg_dev_init(p_hwfn); *buf_size = 0; if (status != DBG_STATUS_OK) return status; if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr || !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size); } enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords) { u32 needed_buf_size_in_dwords; enum dbg_status status; *num_dumped_dwords = 0; status = qed_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords); if (status != DBG_STATUS_OK) return status; if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; /* Doesn't do anything, needed for compile time asserts */ qed_static_asserts(); /* GRC Dump */ status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords); /* Revert GRC params to their default */ qed_dbg_grc_set_params_default(p_hwfn); return status; } enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; struct idle_chk_data *idle_chk = &dev_data->idle_chk; enum dbg_status status; *buf_size = 0; status = qed_dbg_dev_init(p_hwfn); if (status != DBG_STATUS_OK) return status; if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr || !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; if (!idle_chk->buf_size_set) { idle_chk->buf_size = qed_idle_chk_dump(p_hwfn, p_ptt, NULL, false); idle_chk->buf_size_set = true; } *buf_size = idle_chk->buf_size; return DBG_STATUS_OK; } enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords) { u32 needed_buf_size_in_dwords; enum dbg_status status; *num_dumped_dwords = 0; status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords); if (status != DBG_STATUS_OK) return status; if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; /* Update reset state */ qed_grc_unreset_blocks(p_hwfn, p_ptt, true); qed_update_blocks_reset_state(p_hwfn, p_ptt); /* Idle Check Dump */ *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true); /* Revert GRC params to their default */ qed_dbg_grc_set_params_default(p_hwfn); return DBG_STATUS_OK; } enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { enum dbg_status status = qed_dbg_dev_init(p_hwfn); *buf_size = 0; if (status != DBG_STATUS_OK) return status; return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size); } enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords) { u32 needed_buf_size_in_dwords; enum dbg_status status; status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords); if (status != DBG_STATUS_OK && status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED) return status; if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; /* Update reset state */ qed_update_blocks_reset_state(p_hwfn, p_ptt); /* Perform dump */ status = qed_mcp_trace_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords); /* Revert GRC params to their default */ qed_dbg_grc_set_params_default(p_hwfn); return status; } enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { enum dbg_status status = qed_dbg_dev_init(p_hwfn); *buf_size = 0; if (status != DBG_STATUS_OK) return status; return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size); } enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords) { u32 needed_buf_size_in_dwords; enum dbg_status status; *num_dumped_dwords = 0; status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords); if (status != DBG_STATUS_OK) return status; if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; /* Update reset state */ qed_update_blocks_reset_state(p_hwfn, p_ptt); status = qed_reg_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords); /* Revert GRC params to their default */ qed_dbg_grc_set_params_default(p_hwfn); return status; } enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { enum dbg_status status = qed_dbg_dev_init(p_hwfn); *buf_size = 0; if (status != DBG_STATUS_OK) return status; return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size); } enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords) { u32 needed_buf_size_in_dwords; enum dbg_status status; *num_dumped_dwords = 0; status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt, &needed_buf_size_in_dwords); if (status != DBG_STATUS_OK) return status; if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; /* Update reset state */ qed_update_blocks_reset_state(p_hwfn, p_ptt); status = qed_igu_fifo_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords); /* Revert GRC params to their default */ qed_dbg_grc_set_params_default(p_hwfn); return status; } enum dbg_status qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { enum dbg_status status = qed_dbg_dev_init(p_hwfn); *buf_size = 0; if (status != DBG_STATUS_OK) return status; return qed_protection_override_dump(p_hwfn, p_ptt, NULL, false, buf_size); } enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords) { u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords; enum dbg_status status; *num_dumped_dwords = 0; status = qed_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt, p_size); if (status != DBG_STATUS_OK) return status; if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; /* Update reset state */ qed_update_blocks_reset_state(p_hwfn, p_ptt); status = qed_protection_override_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords); /* Revert GRC params to their default */ qed_dbg_grc_set_params_default(p_hwfn); return status; } enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { enum dbg_status status = qed_dbg_dev_init(p_hwfn); *buf_size = 0; if (status != DBG_STATUS_OK) return status; /* Update reset state */ qed_update_blocks_reset_state(p_hwfn, p_ptt); *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false); return DBG_STATUS_OK; } enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords) { u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords; enum dbg_status status; *num_dumped_dwords = 0; status = qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt, p_size); if (status != DBG_STATUS_OK) return status; if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true); /* Revert GRC params to their default */ qed_dbg_grc_set_params_default(p_hwfn); return DBG_STATUS_OK; } static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size) { enum dbg_status status = qed_dbg_dev_init(p_hwfn); *buf_size = 0; if (status != DBG_STATUS_OK) return status; *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, 0, false); return DBG_STATUS_OK; } static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, u32 buf_size_in_dwords, u32 *num_dumped_dwords) { *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, buf_size_in_dwords, true); /* Reveret GRC params to their default */ qed_dbg_grc_set_params_default(p_hwfn); return DBG_STATUS_OK; } enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum block_id block_id, enum dbg_attn_type attn_type, bool clear_status, struct dbg_attn_block_result *results) { enum dbg_status status = qed_dbg_dev_init(p_hwfn); u8 reg_idx, num_attn_regs, num_result_regs = 0; const struct dbg_attn_reg *attn_reg_arr; if (status != DBG_STATUS_OK) return status; if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; attn_reg_arr = qed_get_block_attn_regs(p_hwfn, block_id, attn_type, &num_attn_regs); for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) { const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx]; struct dbg_attn_reg_result *reg_result; u32 sts_addr, sts_val; u16 modes_buf_offset; bool eval_mode; /* Check mode */ eval_mode = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_EVAL_MODE) > 0; modes_buf_offset = GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset)) continue; /* Mode match - read attention status register */ sts_addr = DWORDS_TO_BYTES(clear_status ? reg_data->sts_clr_address : GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS)); sts_val = qed_rd(p_hwfn, p_ptt, sts_addr); if (!sts_val) continue; /* Non-zero attention status - add to results */ reg_result = &results->reg_results[num_result_regs]; SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr); SET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_NUM_REG_ATTN, GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN)); reg_result->block_attn_offset = reg_data->block_attn_offset; reg_result->sts_val = sts_val; reg_result->mask_val = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES (reg_data->mask_address)); num_result_regs++; } results->block_id = (u8)block_id; results->names_offset = qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset; SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type); SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs); return DBG_STATUS_OK; } /******************************* Data Types **********************************/ /* REG fifo element */ struct reg_fifo_element { u64 data; #define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0 #define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff #define REG_FIFO_ELEMENT_ACCESS_SHIFT 23 #define REG_FIFO_ELEMENT_ACCESS_MASK 0x1 #define REG_FIFO_ELEMENT_PF_SHIFT 24 #define REG_FIFO_ELEMENT_PF_MASK 0xf #define REG_FIFO_ELEMENT_VF_SHIFT 28 #define REG_FIFO_ELEMENT_VF_MASK 0xff #define REG_FIFO_ELEMENT_PORT_SHIFT 36 #define REG_FIFO_ELEMENT_PORT_MASK 0x3 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40 #define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7 #define REG_FIFO_ELEMENT_MASTER_SHIFT 43 #define REG_FIFO_ELEMENT_MASTER_MASK 0xf #define REG_FIFO_ELEMENT_ERROR_SHIFT 47 #define REG_FIFO_ELEMENT_ERROR_MASK 0x1f }; /* REG fifo error element */ struct reg_fifo_err { u32 err_code; const char *err_msg; }; /* IGU fifo element */ struct igu_fifo_element { u32 dword0; #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff u32 dword1; u32 dword2; #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff u32 reserved; }; struct igu_fifo_wr_data { u32 data; #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27 #define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1 }; struct igu_fifo_cleanup_wr_data { u32 data; #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1 }; /* Protection override element */ struct protection_override_element { u64 data; #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7 }; enum igu_fifo_sources { IGU_SRC_PXP0, IGU_SRC_PXP1, IGU_SRC_PXP2, IGU_SRC_PXP3, IGU_SRC_PXP4, IGU_SRC_PXP5, IGU_SRC_PXP6, IGU_SRC_PXP7, IGU_SRC_CAU, IGU_SRC_ATTN, IGU_SRC_GRC }; enum igu_fifo_addr_types { IGU_ADDR_TYPE_MSIX_MEM, IGU_ADDR_TYPE_WRITE_PBA, IGU_ADDR_TYPE_WRITE_INT_ACK, IGU_ADDR_TYPE_WRITE_ATTN_BITS, IGU_ADDR_TYPE_READ_INT, IGU_ADDR_TYPE_WRITE_PROD_UPDATE, IGU_ADDR_TYPE_RESERVED }; struct igu_fifo_addr_data { u16 start_addr; u16 end_addr; char *desc; char *vf_desc; enum igu_fifo_addr_types type; }; /******************************** Constants **********************************/ #define MAX_MSG_LEN 1024 #define MCP_TRACE_MAX_MODULE_LEN 8 #define MCP_TRACE_FORMAT_MAX_PARAMS 3 #define MCP_TRACE_FORMAT_PARAM_WIDTH \ (MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET) #define REG_FIFO_ELEMENT_ADDR_FACTOR 4 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4 /***************************** Constant Arrays *******************************/ /* Status string array */ static const char * const s_status_str[] = { /* DBG_STATUS_OK */ "Operation completed successfully", /* DBG_STATUS_APP_VERSION_NOT_SET */ "Debug application version wasn't set", /* DBG_STATUS_UNSUPPORTED_APP_VERSION */ "Unsupported debug application version", /* DBG_STATUS_DBG_BLOCK_NOT_RESET */ "The debug block wasn't reset since the last recording", /* DBG_STATUS_INVALID_ARGS */ "Invalid arguments", /* DBG_STATUS_OUTPUT_ALREADY_SET */ "The debug output was already set", /* DBG_STATUS_INVALID_PCI_BUF_SIZE */ "Invalid PCI buffer size", /* DBG_STATUS_PCI_BUF_ALLOC_FAILED */ "PCI buffer allocation failed", /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */ "A PCI buffer wasn't allocated", /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */ "The filter/trigger constraint dword offsets are not enabled for recording", /* DBG_STATUS_NO_MATCHING_FRAMING_MODE */ "No matching framing mode", /* DBG_STATUS_VFC_READ_ERROR */ "Error reading from VFC", /* DBG_STATUS_STORM_ALREADY_ENABLED */ "The Storm was already enabled", /* DBG_STATUS_STORM_NOT_ENABLED */ "The specified Storm wasn't enabled", /* DBG_STATUS_BLOCK_ALREADY_ENABLED */ "The block was already enabled", /* DBG_STATUS_BLOCK_NOT_ENABLED */ "The specified block wasn't enabled", /* DBG_STATUS_NO_INPUT_ENABLED */ "No input was enabled for recording", /* DBG_STATUS_NO_FILTER_TRIGGER_256B */ "Filters and triggers are not allowed in E4 256-bit mode", /* DBG_STATUS_FILTER_ALREADY_ENABLED */ "The filter was already enabled", /* DBG_STATUS_TRIGGER_ALREADY_ENABLED */ "The trigger was already enabled", /* DBG_STATUS_TRIGGER_NOT_ENABLED */ "The trigger wasn't enabled", /* DBG_STATUS_CANT_ADD_CONSTRAINT */ "A constraint can be added only after a filter was enabled or a trigger state was added", /* DBG_STATUS_TOO_MANY_TRIGGER_STATES */ "Cannot add more than 3 trigger states", /* DBG_STATUS_TOO_MANY_CONSTRAINTS */ "Cannot add more than 4 constraints per filter or trigger state", /* DBG_STATUS_RECORDING_NOT_STARTED */ "The recording wasn't started", /* DBG_STATUS_DATA_DIDNT_TRIGGER */ "A trigger was configured, but it didn't trigger", /* DBG_STATUS_NO_DATA_RECORDED */ "No data was recorded", /* DBG_STATUS_DUMP_BUF_TOO_SMALL */ "Dump buffer is too small", /* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */ "Dumped data is not aligned to chunks", /* DBG_STATUS_UNKNOWN_CHIP */ "Unknown chip", /* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */ "Failed allocating virtual memory", /* DBG_STATUS_BLOCK_IN_RESET */ "The input block is in reset", /* DBG_STATUS_INVALID_TRACE_SIGNATURE */ "Invalid MCP trace signature found in NVRAM", /* DBG_STATUS_INVALID_NVRAM_BUNDLE */ "Invalid bundle ID found in NVRAM", /* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */ "Failed getting NVRAM image", /* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */ "NVRAM image is not dword-aligned", /* DBG_STATUS_NVRAM_READ_FAILED */ "Failed reading from NVRAM", /* DBG_STATUS_IDLE_CHK_PARSE_FAILED */ "Idle check parsing failed", /* DBG_STATUS_MCP_TRACE_BAD_DATA */ "MCP Trace data is corrupt", /* DBG_STATUS_MCP_TRACE_NO_META */ "Dump doesn't contain meta data - it must be provided in image file", /* DBG_STATUS_MCP_COULD_NOT_HALT */ "Failed to halt MCP", /* DBG_STATUS_MCP_COULD_NOT_RESUME */ "Failed to resume MCP after halt", /* DBG_STATUS_RESERVED0 */ "", /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */ "Failed to empty SEMI sync FIFO", /* DBG_STATUS_IGU_FIFO_BAD_DATA */ "IGU FIFO data is corrupt", /* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */ "MCP failed to mask parities", /* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */ "FW Asserts parsing failed", /* DBG_STATUS_REG_FIFO_BAD_DATA */ "GRC FIFO data is corrupt", /* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */ "Protection Override data is corrupt", /* DBG_STATUS_DBG_ARRAY_NOT_SET */ "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)", /* DBG_STATUS_RESERVED1 */ "", /* DBG_STATUS_NON_MATCHING_LINES */ "Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)", /* DBG_STATUS_INSUFFICIENT_HW_IDS */ "Insufficient HW IDs. Try to record less Storms/blocks", /* DBG_STATUS_DBG_BUS_IN_USE */ "The debug bus is in use", /* DBG_STATUS_INVALID_STORM_DBG_MODE */ "The storm debug mode is not supported in the current chip", /* DBG_STATUS_OTHER_ENGINE_BB_ONLY */ "Other engine is supported only in BB", /* DBG_STATUS_FILTER_SINGLE_HW_ID */ "The configured filter mode requires a single Storm/block input", /* DBG_STATUS_TRIGGER_SINGLE_HW_ID */ "The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input", /* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */ "When triggering on Storm data, the Storm to trigger on must be specified", /* DBG_STATUS_MDUMP2_FAILED_TO_REQUEST_OFFSIZE */ "Failed to request MDUMP2 Offsize", /* DBG_STATUS_MDUMP2_FAILED_VALIDATION_OF_DATA_CRC */ "Expected CRC (part of the MDUMP2 data) is different than the calculated CRC over that data", /* DBG_STATUS_MDUMP2_INVALID_SIGNATURE */ "Invalid Signature found at start of MDUMP2", /* DBG_STATUS_MDUMP2_INVALID_LOG_SIZE */ "Invalid Log Size of MDUMP2", /* DBG_STATUS_MDUMP2_INVALID_LOG_HDR */ "Invalid Log Header of MDUMP2", /* DBG_STATUS_MDUMP2_INVALID_LOG_DATA */ "Invalid Log Data of MDUMP2", /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_NUM_PORTS */ "Could not extract number of ports from regval buf of MDUMP2", /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_MFW_STATUS */ "Could not extract MFW (link) status from regval buf of MDUMP2", /* DBG_STATUS_MDUMP2_ERROR_DISPLAYING_LINKDUMP */ "Could not display linkdump of MDUMP2", /* DBG_STATUS_MDUMP2_ERROR_READING_PHY_CFG */ "Could not read PHY CFG of MDUMP2", /* DBG_STATUS_MDUMP2_ERROR_READING_PLL_MODE */ "Could not read PLL Mode of MDUMP2", /* DBG_STATUS_MDUMP2_ERROR_READING_LANE_REGS */ "Could not read TSCF/TSCE Lane Regs of MDUMP2", /* DBG_STATUS_MDUMP2_ERROR_ALLOCATING_BUF */ "Could not allocate MDUMP2 reg-val internal buffer" }; /* Idle check severity names array */ static const char * const s_idle_chk_severity_str[] = { "Error", "Error if no traffic", "Warning" }; /* MCP Trace level names array */ static const char * const s_mcp_trace_level_str[] = { "ERROR", "TRACE", "DEBUG" }; /* Access type names array */ static const char * const s_access_strs[] = { "read", "write" }; /* Privilege type names array */ static const char * const s_privilege_strs[] = { "VF", "PDA", "HV", "UA" }; /* Protection type names array */ static const char * const s_protection_strs[] = { "(default)", "(default)", "(default)", "(default)", "override VF", "override PDA", "override HV", "override UA" }; /* Master type names array */ static const char * const s_master_strs[] = { "???", "pxp", "mcp", "msdm", "psdm", "ysdm", "usdm", "tsdm", "xsdm", "dbu", "dmae", "jdap", "???", "???", "???", "???" }; /* REG FIFO error messages array */ static struct reg_fifo_err s_reg_fifo_errors[] = { {1, "grc timeout"}, {2, "address doesn't belong to any block"}, {4, "reserved address in block or write to read-only address"}, {8, "privilege/protection mismatch"}, {16, "path isolation error"}, {17, "RSL error"} }; /* IGU FIFO sources array */ static const char * const s_igu_fifo_source_strs[] = { "TSTORM", "MSTORM", "USTORM", "XSTORM", "YSTORM", "PSTORM", "PCIE", "NIG_QM_PBF", "CAU", "ATTN", "GRC", }; /* IGU FIFO error messages */ static const char * const s_igu_fifo_error_strs[] = { "no error", "length error", "function disabled", "VF sent command to attention address", "host sent prod update command", "read of during interrupt register while in MIMD mode", "access to PXP BAR reserved address", "producer update command to attention index", "unknown error", "SB index not valid", "SB relative index and FID not found", "FID not match", "command with error flag asserted (PCI error or CAU discard)", "VF sent cleanup and RF cleanup is disabled", "cleanup command on type bigger than 4" }; /* IGU FIFO address data */ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = { {0x0, 0x101, "MSI-X Memory", NULL, IGU_ADDR_TYPE_MSIX_MEM}, {0x102, 0x1ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED}, {0x200, 0x200, "Write PBA[0:63]", NULL, IGU_ADDR_TYPE_WRITE_PBA}, {0x201, 0x201, "Write PBA[64:127]", "reserved", IGU_ADDR_TYPE_WRITE_PBA}, {0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA}, {0x203, 0x3ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED}, {0x400, 0x5ef, "Write interrupt acknowledgment", NULL, IGU_ADDR_TYPE_WRITE_INT_ACK}, {0x5f0, 0x5f0, "Attention bits update", NULL, IGU_ADDR_TYPE_WRITE_ATTN_BITS}, {0x5f1, 0x5f1, "Attention bits set", NULL, IGU_ADDR_TYPE_WRITE_ATTN_BITS}, {0x5f2, 0x5f2, "Attention bits clear", NULL, IGU_ADDR_TYPE_WRITE_ATTN_BITS}, {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL, IGU_ADDR_TYPE_READ_INT}, {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL, IGU_ADDR_TYPE_READ_INT}, {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL, IGU_ADDR_TYPE_READ_INT}, {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL, IGU_ADDR_TYPE_READ_INT}, {0x5f7, 0x5ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED}, {0x600, 0x7ff, "Producer update", NULL, IGU_ADDR_TYPE_WRITE_PROD_UPDATE} }; /******************************** Variables **********************************/ /* Temporary buffer, used for print size calculations */ static char s_temp_buf[MAX_MSG_LEN]; /**************************** Private Functions ******************************/ static void qed_user_static_asserts(void) { } static u32 qed_cyclic_add(u32 a, u32 b, u32 size) { return (a + b) % size; } static u32 qed_cyclic_sub(u32 a, u32 b, u32 size) { return (size + a - b) % size; } /* Reads the specified number of bytes from the specified cyclic buffer (up to 4 * bytes) and returns them as a dword value. the specified buffer offset is * updated. */ static u32 qed_read_from_cyclic_buf(void *buf, u32 *offset, u32 buf_size, u8 num_bytes_to_read) { u8 i, *val_ptr, *bytes_buf = (u8 *)buf; u32 val = 0; val_ptr = (u8 *)&val; /* Assume running on a LITTLE ENDIAN and the buffer is network order * (BIG ENDIAN), as high order bytes are placed in lower memory address. */ for (i = 0; i < num_bytes_to_read; i++) { val_ptr[i] = bytes_buf[*offset]; *offset = qed_cyclic_add(*offset, 1, buf_size); } return val; } /* Reads and returns the next byte from the specified buffer. * The specified buffer offset is updated. */ static u8 qed_read_byte_from_buf(void *buf, u32 *offset) { return ((u8 *)buf)[(*offset)++]; } /* Reads and returns the next dword from the specified buffer. * The specified buffer offset is updated. */ static u32 qed_read_dword_from_buf(void *buf, u32 *offset) { u32 dword_val = *(u32 *)&((u8 *)buf)[*offset]; *offset += 4; return dword_val; } /* Reads the next string from the specified buffer, and copies it to the * specified pointer. The specified buffer offset is updated. */ static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest) { const char *source_str = &((const char *)buf)[*offset]; strncpy(dest, source_str, size); dest[size - 1] = '\0'; *offset += size; } /* Returns a pointer to the specified offset (in bytes) of the specified buffer. * If the specified buffer in NULL, a temporary buffer pointer is returned. */ static char *qed_get_buf_ptr(void *buf, u32 offset) { return buf ? (char *)buf + offset : s_temp_buf; } /* Reads a param from the specified buffer. Returns the number of dwords read. * If the returned str_param is NULL, the param is numeric and its value is * returned in num_param. * Otheriwise, the param is a string and its pointer is returned in str_param. */ static u32 qed_read_param(u32 *dump_buf, const char **param_name, const char **param_str_val, u32 *param_num_val) { char *char_buf = (char *)dump_buf; size_t offset = 0; /* Extract param name */ *param_name = char_buf; offset += strlen(*param_name) + 1; /* Check param type */ if (*(char_buf + offset++)) { /* String param */ *param_str_val = char_buf + offset; *param_num_val = 0; offset += strlen(*param_str_val) + 1; if (offset & 0x3) offset += (4 - (offset & 0x3)); } else { /* Numeric param */ *param_str_val = NULL; if (offset & 0x3) offset += (4 - (offset & 0x3)); *param_num_val = *(u32 *)(char_buf + offset); offset += 4; } return (u32)offset / 4; } /* Reads a section header from the specified buffer. * Returns the number of dwords read. */ static u32 qed_read_section_hdr(u32 *dump_buf, const char **section_name, u32 *num_section_params) { const char *param_str_val; return qed_read_param(dump_buf, section_name, &param_str_val, num_section_params); } /* Reads section params from the specified buffer and prints them to the results * buffer. Returns the number of dwords read. */ static u32 qed_print_section_params(u32 *dump_buf, u32 num_section_params, char *results_buf, u32 *num_chars_printed) { u32 i, dump_offset = 0, results_offset = 0; for (i = 0; i < num_section_params; i++) { const char *param_name, *param_str_val; u32 param_num_val = 0; dump_offset += qed_read_param(dump_buf + dump_offset, &param_name, &param_str_val, &param_num_val); if (param_str_val) results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "%s: %s\n", param_name, param_str_val); else if (strcmp(param_name, "fw-timestamp")) results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "%s: %d\n", param_name, param_num_val); } results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n"); *num_chars_printed = results_offset; return dump_offset; } /* Returns the block name that matches the specified block ID, * or NULL if not found. */ static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn, enum block_id block_id) { const struct dbg_block_user *block = (const struct dbg_block_user *) p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id; return (const char *)block->name; } static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn *p_hwfn) { return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info; } /* Parses the idle check rules and returns the number of characters printed. * In case of parsing error, returns 0. */ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 *dump_buf_end, u32 num_rules, bool print_fw_idle_chk, char *results_buf, u32 *num_errors, u32 *num_warnings) { /* Offset in results_buf in bytes */ u32 results_offset = 0; u32 rule_idx; u16 i, j; *num_errors = 0; *num_warnings = 0; /* Go over dumped results */ for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end; rule_idx++) { const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data; struct dbg_idle_chk_result_hdr *hdr; const char *parsing_str, *lsi_msg; u32 parsing_str_offset; bool has_fw_msg; u8 curr_reg_id; hdr = (struct dbg_idle_chk_result_hdr *)dump_buf; rule_parsing_data = (const struct dbg_idle_chk_rule_parsing_data *) p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr + hdr->rule_id; parsing_str_offset = GET_FIELD(rule_parsing_data->data, DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET); has_fw_msg = GET_FIELD(rule_parsing_data->data, DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0; parsing_str = (const char *) p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr + parsing_str_offset; lsi_msg = parsing_str; curr_reg_id = 0; if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES) return 0; /* Skip rule header */ dump_buf += BYTES_TO_DWORDS(sizeof(*hdr)); /* Update errors/warnings count */ if (hdr->severity == IDLE_CHK_SEVERITY_ERROR || hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC) (*num_errors)++; else (*num_warnings)++; /* Print rule severity */ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "%s: ", s_idle_chk_severity_str[hdr->severity]); /* Print rule message */ if (has_fw_msg) parsing_str += strlen(parsing_str) + 1; results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "%s.", has_fw_msg && print_fw_idle_chk ? parsing_str : lsi_msg); parsing_str += strlen(parsing_str) + 1; /* Print register values */ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), " Registers:"); for (i = 0; i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs; i++) { struct dbg_idle_chk_result_reg_hdr *reg_hdr; bool is_mem; u8 reg_id; reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)dump_buf; is_mem = GET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM); reg_id = GET_FIELD(reg_hdr->data, DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID); /* Skip reg header */ dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr)); /* Skip register names until the required reg_id is * reached. */ for (; reg_id > curr_reg_id; curr_reg_id++) parsing_str += strlen(parsing_str) + 1; results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), " %s", parsing_str); if (i < hdr->num_dumped_cond_regs && is_mem) results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "[%d]", hdr->mem_entry_id + reg_hdr->start_entry); results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "="); for (j = 0; j < reg_hdr->size; j++, dump_buf++) { results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "0x%x", *dump_buf); if (j < reg_hdr->size - 1) results_offset += sprintf(qed_get_buf_ptr (results_buf, results_offset), ","); } } results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n"); } /* Check if end of dump buffer was exceeded */ if (dump_buf > dump_buf_end) return 0; return results_offset; } /* Parses an idle check dump buffer. * If result_buf is not NULL, the idle check results are printed to it. * In any case, the required results buffer size is assigned to * parsed_results_bytes. * The parsing status is returned. */ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf, u32 *parsed_results_bytes, u32 *num_errors, u32 *num_warnings) { u32 num_section_params = 0, num_rules, num_rules_not_dumped; const char *section_name, *param_name, *param_str_val; u32 *dump_buf_end = dump_buf + num_dumped_dwords; /* Offset in results_buf in bytes */ u32 results_offset = 0; *parsed_results_bytes = 0; *num_errors = 0; *num_warnings = 0; if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr || !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; /* Read global_params section */ dump_buf += qed_read_section_hdr(dump_buf, &section_name, &num_section_params); if (strcmp(section_name, "global_params")) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; /* Print global params */ dump_buf += qed_print_section_params(dump_buf, num_section_params, results_buf, &results_offset); /* Read idle_chk section * There may be 1 or 2 idle_chk section parameters: * - 1st is "num_rules" * - 2nd is "num_rules_not_dumped" (optional) */ dump_buf += qed_read_section_hdr(dump_buf, &section_name, &num_section_params); if (strcmp(section_name, "idle_chk") || (num_section_params != 2 && num_section_params != 1)) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; dump_buf += qed_read_param(dump_buf, &param_name, &param_str_val, &num_rules); if (strcmp(param_name, "num_rules")) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; if (num_section_params > 1) { dump_buf += qed_read_param(dump_buf, &param_name, &param_str_val, &num_rules_not_dumped); if (strcmp(param_name, "num_rules_not_dumped")) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; } else { num_rules_not_dumped = 0; } if (num_rules) { u32 rules_print_size; /* Print FW output */ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "FW_IDLE_CHECK:\n"); rules_print_size = qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf, dump_buf_end, num_rules, true, results_buf ? results_buf + results_offset : NULL, num_errors, num_warnings); results_offset += rules_print_size; if (!rules_print_size) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; /* Print LSI output */ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "\nLSI_IDLE_CHECK:\n"); rules_print_size = qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf, dump_buf_end, num_rules, false, results_buf ? results_buf + results_offset : NULL, num_errors, num_warnings); results_offset += rules_print_size; if (!rules_print_size) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; } /* Print errors/warnings count */ if (*num_errors) results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "\nIdle Check failed!!! (with %d errors and %d warnings)\n", *num_errors, *num_warnings); else if (*num_warnings) results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "\nIdle Check completed successfully (with %d warnings)\n", *num_warnings); else results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "\nIdle Check completed successfully\n"); if (num_rules_not_dumped) results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "\nIdle Check Partially dumped : num_rules_not_dumped = %d\n", num_rules_not_dumped); /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; return DBG_STATUS_OK; } /* Allocates and fills MCP Trace meta data based on the specified meta data * dump buffer. * Returns debug status code. */ static enum dbg_status qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn, const u32 *meta_buf) { struct dbg_tools_user_data *dev_user_data; u32 offset = 0, signature, i; struct mcp_trace_meta *meta; u8 *meta_buf_bytes; dev_user_data = qed_dbg_get_user_data(p_hwfn); meta = &dev_user_data->mcp_trace_meta; meta_buf_bytes = (u8 *)meta_buf; /* Free the previous meta before loading a new one. */ if (meta->is_allocated) qed_mcp_trace_free_meta_data(p_hwfn); memset(meta, 0, sizeof(*meta)); /* Read first signature */ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset); if (signature != NVM_MAGIC_VALUE) return DBG_STATUS_INVALID_TRACE_SIGNATURE; /* Read no. of modules and allocate memory for their pointers */ meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset); meta->modules = kcalloc(meta->modules_num, sizeof(char *), GFP_KERNEL); if (!meta->modules) return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; /* Allocate and read all module strings */ for (i = 0; i < meta->modules_num; i++) { u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset); *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL); if (!(*(meta->modules + i))) { /* Update number of modules to be released */ meta->modules_num = i ? i - 1 : 0; return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; } qed_read_str_from_buf(meta_buf_bytes, &offset, module_len, *(meta->modules + i)); if (module_len > MCP_TRACE_MAX_MODULE_LEN) (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0'; } /* Read second signature */ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset); if (signature != NVM_MAGIC_VALUE) return DBG_STATUS_INVALID_TRACE_SIGNATURE; /* Read number of formats and allocate memory for all formats */ meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset); meta->formats = kcalloc(meta->formats_num, sizeof(struct mcp_trace_format), GFP_KERNEL); if (!meta->formats) return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; /* Allocate and read all strings */ for (i = 0; i < meta->formats_num; i++) { struct mcp_trace_format *format_ptr = &meta->formats[i]; u8 format_len; format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes, &offset); format_len = GET_MFW_FIELD(format_ptr->data, MCP_TRACE_FORMAT_LEN); format_ptr->format_str = kzalloc(format_len, GFP_KERNEL); if (!format_ptr->format_str) { /* Update number of modules to be released */ meta->formats_num = i ? i - 1 : 0; return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; } qed_read_str_from_buf(meta_buf_bytes, &offset, format_len, format_ptr->format_str); } meta->is_allocated = true; return DBG_STATUS_OK; } /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results * are printed to it. The parsing status is returned. * Arguments: * trace_buf - MCP trace cyclic buffer * trace_buf_size - MCP trace cyclic buffer size in bytes * data_offset - offset in bytes of the data to parse in the MCP trace cyclic * buffer. * data_size - size in bytes of data to parse. * parsed_buf - destination buffer for parsed data. * parsed_results_bytes - size of parsed data in bytes. */ static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn, u8 *trace_buf, u32 trace_buf_size, u32 data_offset, u32 data_size, char *parsed_buf, u32 *parsed_results_bytes) { struct dbg_tools_user_data *dev_user_data; struct mcp_trace_meta *meta; u32 param_mask, param_shift; enum dbg_status status; dev_user_data = qed_dbg_get_user_data(p_hwfn); meta = &dev_user_data->mcp_trace_meta; *parsed_results_bytes = 0; if (!meta->is_allocated) return DBG_STATUS_MCP_TRACE_BAD_DATA; status = DBG_STATUS_OK; while (data_size) { struct mcp_trace_format *format_ptr; u8 format_level, format_module; u32 params[3] = { 0, 0, 0 }; u32 header, format_idx, i; if (data_size < MFW_TRACE_ENTRY_SIZE) return DBG_STATUS_MCP_TRACE_BAD_DATA; header = qed_read_from_cyclic_buf(trace_buf, &data_offset, trace_buf_size, MFW_TRACE_ENTRY_SIZE); data_size -= MFW_TRACE_ENTRY_SIZE; format_idx = header & MFW_TRACE_EVENTID_MASK; /* Skip message if its index doesn't exist in the meta data */ if (format_idx >= meta->formats_num) { u8 format_size = (u8)GET_MFW_FIELD(header, MFW_TRACE_PRM_SIZE); if (data_size < format_size) return DBG_STATUS_MCP_TRACE_BAD_DATA; data_offset = qed_cyclic_add(data_offset, format_size, trace_buf_size); data_size -= format_size; continue; } format_ptr = &meta->formats[format_idx]; for (i = 0, param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift = MCP_TRACE_FORMAT_P1_SIZE_OFFSET; i < MCP_TRACE_FORMAT_MAX_PARAMS; i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH, param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) { /* Extract param size (0..3) */ u8 param_size = (u8)((format_ptr->data & param_mask) >> param_shift); /* If the param size is zero, there are no other * parameters. */ if (!param_size) break; /* Size is encoded using 2 bits, where 3 is used to * encode 4. */ if (param_size == 3) param_size = 4; if (data_size < param_size) return DBG_STATUS_MCP_TRACE_BAD_DATA; params[i] = qed_read_from_cyclic_buf(trace_buf, &data_offset, trace_buf_size, param_size); data_size -= param_size; } format_level = (u8)GET_MFW_FIELD(format_ptr->data, MCP_TRACE_FORMAT_LEVEL); format_module = (u8)GET_MFW_FIELD(format_ptr->data, MCP_TRACE_FORMAT_MODULE); if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) return DBG_STATUS_MCP_TRACE_BAD_DATA; /* Print current message to results buffer */ *parsed_results_bytes += sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes), "%s %-8s: ", s_mcp_trace_level_str[format_level], meta->modules[format_module]); *parsed_results_bytes += sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes), format_ptr->format_str, params[0], params[1], params[2]); } /* Add string NULL terminator */ (*parsed_results_bytes)++; return status; } /* Parses an MCP Trace dump buffer. * If result_buf is not NULL, the MCP Trace results are printed to it. * In any case, the required results buffer size is assigned to * parsed_results_bytes. * The parsing status is returned. */ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, char *results_buf, u32 *parsed_results_bytes, bool free_meta_data) { const char *section_name, *param_name, *param_str_val; u32 data_size, trace_data_dwords, trace_meta_dwords; u32 offset, results_offset, results_buf_bytes; u32 param_num_val, num_section_params; struct mcp_trace *trace; enum dbg_status status; const u32 *meta_buf; u8 *trace_buf; *parsed_results_bytes = 0; /* Read global_params section */ dump_buf += qed_read_section_hdr(dump_buf, &section_name, &num_section_params); if (strcmp(section_name, "global_params")) return DBG_STATUS_MCP_TRACE_BAD_DATA; /* Print global params */ dump_buf += qed_print_section_params(dump_buf, num_section_params, results_buf, &results_offset); /* Read trace_data section */ dump_buf += qed_read_section_hdr(dump_buf, &section_name, &num_section_params); if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1) return DBG_STATUS_MCP_TRACE_BAD_DATA; dump_buf += qed_read_param(dump_buf, &param_name, &param_str_val, &param_num_val); if (strcmp(param_name, "size")) return DBG_STATUS_MCP_TRACE_BAD_DATA; trace_data_dwords = param_num_val; /* Prepare trace info */ trace = (struct mcp_trace *)dump_buf; if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size) return DBG_STATUS_MCP_TRACE_BAD_DATA; trace_buf = (u8 *)dump_buf + sizeof(*trace); offset = trace->trace_oldest; data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size); dump_buf += trace_data_dwords; /* Read meta_data section */ dump_buf += qed_read_section_hdr(dump_buf, &section_name, &num_section_params); if (strcmp(section_name, "mcp_trace_meta")) return DBG_STATUS_MCP_TRACE_BAD_DATA; dump_buf += qed_read_param(dump_buf, &param_name, &param_str_val, &param_num_val); if (strcmp(param_name, "size")) return DBG_STATUS_MCP_TRACE_BAD_DATA; trace_meta_dwords = param_num_val; /* Choose meta data buffer */ if (!trace_meta_dwords) { /* Dump doesn't include meta data */ struct dbg_tools_user_data *dev_user_data = qed_dbg_get_user_data(p_hwfn); if (!dev_user_data->mcp_trace_user_meta_buf) return DBG_STATUS_MCP_TRACE_NO_META; meta_buf = dev_user_data->mcp_trace_user_meta_buf; } else { /* Dump includes meta data */ meta_buf = dump_buf; } /* Allocate meta data memory */ status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf); if (status != DBG_STATUS_OK) return status; status = qed_parse_mcp_trace_buf(p_hwfn, trace_buf, trace->size, offset, data_size, results_buf ? results_buf + results_offset : NULL, &results_buf_bytes); if (status != DBG_STATUS_OK) return status; if (free_meta_data) qed_mcp_trace_free_meta_data(p_hwfn); *parsed_results_bytes = results_offset + results_buf_bytes; return DBG_STATUS_OK; } /* Parses a Reg FIFO dump buffer. * If result_buf is not NULL, the Reg FIFO results are printed to it. * In any case, the required results buffer size is assigned to * parsed_results_bytes. * The parsing status is returned. */ static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf, char *results_buf, u32 *parsed_results_bytes) { const char *section_name, *param_name, *param_str_val; u32 param_num_val, num_section_params, num_elements; struct reg_fifo_element *elements; u8 i, j, err_code, vf_val; u32 results_offset = 0; char vf_str[4]; /* Read global_params section */ dump_buf += qed_read_section_hdr(dump_buf, &section_name, &num_section_params); if (strcmp(section_name, "global_params")) return DBG_STATUS_REG_FIFO_BAD_DATA; /* Print global params */ dump_buf += qed_print_section_params(dump_buf, num_section_params, results_buf, &results_offset); /* Read reg_fifo_data section */ dump_buf += qed_read_section_hdr(dump_buf, &section_name, &num_section_params); if (strcmp(section_name, "reg_fifo_data")) return DBG_STATUS_REG_FIFO_BAD_DATA; dump_buf += qed_read_param(dump_buf, &param_name, &param_str_val, &param_num_val); if (strcmp(param_name, "size")) return DBG_STATUS_REG_FIFO_BAD_DATA; if (param_num_val % REG_FIFO_ELEMENT_DWORDS) return DBG_STATUS_REG_FIFO_BAD_DATA; num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS; elements = (struct reg_fifo_element *)dump_buf; /* Decode elements */ for (i = 0; i < num_elements; i++) { const char *err_msg = NULL; /* Discover if element belongs to a VF or a PF */ vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF); if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL) sprintf(vf_str, "%s", "N/A"); else sprintf(vf_str, "%d", vf_val); /* Find error message */ err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR); for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++) if (err_code == s_reg_fifo_errors[j].err_code) err_msg = s_reg_fifo_errors[j].err_msg; /* Add parsed element to parsed buffer */ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n", elements[i].data, (u32)GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ADDRESS) * REG_FIFO_ELEMENT_ADDR_FACTOR, s_access_strs[GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ACCESS)], (u32)GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_PF), vf_str, (u32)GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_PORT), s_privilege_strs[GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_PRIVILEGE)], s_protection_strs[GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_PROTECTION)], s_master_strs[GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_MASTER)], err_msg ? err_msg : "unknown error code"); } results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "fifo contained %d elements", num_elements); /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; return DBG_STATUS_OK; } static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element *element, char *results_buf, u32 *results_offset) { const struct igu_fifo_addr_data *found_addr = NULL; u8 source, err_type, i, is_cleanup; char parsed_addr_data[32]; char parsed_wr_data[256]; u32 wr_data, prod_cons; bool is_wr_cmd, is_pf; u16 cmd_addr; u64 dword12; /* Dword12 (dword index 1 and 2) contains bits 32..95 of the * FIFO element. */ dword12 = ((u64)element->dword2 << 32) | element->dword1; is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD); is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF); cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR); source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE); err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE); if (source >= ARRAY_SIZE(s_igu_fifo_source_strs)) return DBG_STATUS_IGU_FIFO_BAD_DATA; if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs)) return DBG_STATUS_IGU_FIFO_BAD_DATA; /* Find address data */ for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) { const struct igu_fifo_addr_data *curr_addr = &s_igu_fifo_addr_data[i]; if (cmd_addr >= curr_addr->start_addr && cmd_addr <= curr_addr->end_addr) found_addr = curr_addr; } if (!found_addr) return DBG_STATUS_IGU_FIFO_BAD_DATA; /* Prepare parsed address data */ switch (found_addr->type) { case IGU_ADDR_TYPE_MSIX_MEM: sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2); break; case IGU_ADDR_TYPE_WRITE_INT_ACK: case IGU_ADDR_TYPE_WRITE_PROD_UPDATE: sprintf(parsed_addr_data, " SB = 0x%x", cmd_addr - found_addr->start_addr); break; default: parsed_addr_data[0] = '\0'; } if (!is_wr_cmd) { parsed_wr_data[0] = '\0'; goto out; } /* Prepare parsed write data */ wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA); prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS); is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE); if (source == IGU_SRC_ATTN) { sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons); } else { if (is_cleanup) { u8 cleanup_val, cleanup_type; cleanup_val = GET_FIELD(wr_data, IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL); cleanup_type = GET_FIELD(wr_data, IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE); sprintf(parsed_wr_data, "cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ", cleanup_val ? "set" : "clear", cleanup_type); } else { u8 update_flag, en_dis_int_for_sb, segment; u8 timer_mask; update_flag = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_UPDATE_FLAG); en_dis_int_for_sb = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB); segment = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_SEGMENT); timer_mask = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_TIMER_MASK); sprintf(parsed_wr_data, "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ", prod_cons, update_flag ? "update" : "nop", en_dis_int_for_sb ? (en_dis_int_for_sb == 1 ? "disable" : "nop") : "enable", segment ? "attn" : "regular", timer_mask); } } out: /* Add parsed element to parsed buffer */ *results_offset += sprintf(qed_get_buf_ptr(results_buf, *results_offset), "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n", element->dword2, element->dword1, element->dword0, is_pf ? "pf" : "vf", GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_FID), s_igu_fifo_source_strs[source], is_wr_cmd ? "wr" : "rd", cmd_addr, (!is_pf && found_addr->vf_desc) ? found_addr->vf_desc : found_addr->desc, parsed_addr_data, parsed_wr_data, s_igu_fifo_error_strs[err_type]); return DBG_STATUS_OK; } /* Parses an IGU FIFO dump buffer. * If result_buf is not NULL, the IGU FIFO results are printed to it. * In any case, the required results buffer size is assigned to * parsed_results_bytes. * The parsing status is returned. */ static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf, char *results_buf, u32 *parsed_results_bytes) { const char *section_name, *param_name, *param_str_val; u32 param_num_val, num_section_params, num_elements; struct igu_fifo_element *elements; enum dbg_status status; u32 results_offset = 0; u8 i; /* Read global_params section */ dump_buf += qed_read_section_hdr(dump_buf, &section_name, &num_section_params); if (strcmp(section_name, "global_params")) return DBG_STATUS_IGU_FIFO_BAD_DATA; /* Print global params */ dump_buf += qed_print_section_params(dump_buf, num_section_params, results_buf, &results_offset); /* Read igu_fifo_data section */ dump_buf += qed_read_section_hdr(dump_buf, &section_name, &num_section_params); if (strcmp(section_name, "igu_fifo_data")) return DBG_STATUS_IGU_FIFO_BAD_DATA; dump_buf += qed_read_param(dump_buf, &param_name, &param_str_val, &param_num_val); if (strcmp(param_name, "size")) return DBG_STATUS_IGU_FIFO_BAD_DATA; if (param_num_val % IGU_FIFO_ELEMENT_DWORDS) return DBG_STATUS_IGU_FIFO_BAD_DATA; num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS; elements = (struct igu_fifo_element *)dump_buf; /* Decode elements */ for (i = 0; i < num_elements; i++) { status = qed_parse_igu_fifo_element(&elements[i], results_buf, &results_offset); if (status != DBG_STATUS_OK) return status; } results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "fifo contained %d elements", num_elements); /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; return DBG_STATUS_OK; } static enum dbg_status qed_parse_protection_override_dump(u32 *dump_buf, char *results_buf, u32 *parsed_results_bytes) { const char *section_name, *param_name, *param_str_val; u32 param_num_val, num_section_params, num_elements; struct protection_override_element *elements; u32 results_offset = 0; u8 i; /* Read global_params section */ dump_buf += qed_read_section_hdr(dump_buf, &section_name, &num_section_params); if (strcmp(section_name, "global_params")) return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; /* Print global params */ dump_buf += qed_print_section_params(dump_buf, num_section_params, results_buf, &results_offset); /* Read protection_override_data section */ dump_buf += qed_read_section_hdr(dump_buf, &section_name, &num_section_params); if (strcmp(section_name, "protection_override_data")) return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; dump_buf += qed_read_param(dump_buf, &param_name, &param_str_val, &param_num_val); if (strcmp(param_name, "size")) return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS) return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS; elements = (struct protection_override_element *)dump_buf; /* Decode elements */ for (i = 0; i < num_elements; i++) { u32 address = GET_FIELD(elements[i].data, PROTECTION_OVERRIDE_ELEMENT_ADDRESS) * PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR; results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n", i, address, (u32)GET_FIELD(elements[i].data, PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE), (u32)GET_FIELD(elements[i].data, PROTECTION_OVERRIDE_ELEMENT_READ), (u32)GET_FIELD(elements[i].data, PROTECTION_OVERRIDE_ELEMENT_WRITE), s_protection_strs[GET_FIELD(elements[i].data, PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)], s_protection_strs[GET_FIELD(elements[i].data, PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]); } results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "protection override contained %d elements", num_elements); /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; return DBG_STATUS_OK; } /* Parses a FW Asserts dump buffer. * If result_buf is not NULL, the FW Asserts results are printed to it. * In any case, the required results buffer size is assigned to * parsed_results_bytes. * The parsing status is returned. */ static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf, char *results_buf, u32 *parsed_results_bytes) { u32 num_section_params, param_num_val, i, results_offset = 0; const char *param_name, *param_str_val, *section_name; bool last_section_found = false; *parsed_results_bytes = 0; /* Read global_params section */ dump_buf += qed_read_section_hdr(dump_buf, &section_name, &num_section_params); if (strcmp(section_name, "global_params")) return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; /* Print global params */ dump_buf += qed_print_section_params(dump_buf, num_section_params, results_buf, &results_offset); while (!last_section_found) { dump_buf += qed_read_section_hdr(dump_buf, &section_name, &num_section_params); if (!strcmp(section_name, "fw_asserts")) { /* Extract params */ const char *storm_letter = NULL; u32 storm_dump_size = 0; for (i = 0; i < num_section_params; i++) { dump_buf += qed_read_param(dump_buf, &param_name, &param_str_val, &param_num_val); if (!strcmp(param_name, "storm")) storm_letter = param_str_val; else if (!strcmp(param_name, "size")) storm_dump_size = param_num_val; else return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; } if (!storm_letter || !storm_dump_size) return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; /* Print data */ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n%sSTORM_ASSERT: size=%d\n", storm_letter, storm_dump_size); for (i = 0; i < storm_dump_size; i++, dump_buf++) results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), "%08x\n", *dump_buf); } else if (!strcmp(section_name, "last")) { last_section_found = true; } else { return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; } } /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; return DBG_STATUS_OK; } /***************************** Public Functions *******************************/ enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn, const u8 * const bin_ptr) { struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr; u8 buf_id; /* Convert binary data to debug arrays */ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) qed_set_dbg_bin_buf(p_hwfn, (enum bin_dbg_buffer_type)buf_id, (u32 *)(bin_ptr + buf_hdrs[buf_id].offset), buf_hdrs[buf_id].length); return DBG_STATUS_OK; } enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn, void **user_data_ptr) { *user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data), GFP_KERNEL); if (!(*user_data_ptr)) return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; return DBG_STATUS_OK; } const char *qed_dbg_get_status_str(enum dbg_status status) { return (status < MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status"; } enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size) { u32 num_errors, num_warnings; return qed_parse_idle_chk_dump(p_hwfn, dump_buf, num_dumped_dwords, NULL, results_buf_size, &num_errors, &num_warnings); } enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf, u32 *num_errors, u32 *num_warnings) { u32 parsed_buf_size; return qed_parse_idle_chk_dump(p_hwfn, dump_buf, num_dumped_dwords, results_buf, &parsed_buf_size, num_errors, num_warnings); } void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, const u32 *meta_buf) { struct dbg_tools_user_data *dev_user_data = qed_dbg_get_user_data(p_hwfn); dev_user_data->mcp_trace_user_meta_buf = meta_buf; } enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size) { return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, NULL, results_buf_size, true); } enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf) { u32 parsed_buf_size; /* Doesn't do anything, needed for compile time asserts */ qed_user_static_asserts(); return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf, &parsed_buf_size, true); } enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, u32 *dump_buf, char *results_buf) { u32 parsed_buf_size; return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf, &parsed_buf_size, false); } enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, u8 *dump_buf, u32 num_dumped_bytes, char *results_buf) { u32 parsed_results_bytes; return qed_parse_mcp_trace_buf(p_hwfn, dump_buf, num_dumped_bytes, 0, num_dumped_bytes, results_buf, &parsed_results_bytes); } /* Frees the specified MCP Trace meta data */ void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn) { struct dbg_tools_user_data *dev_user_data; struct mcp_trace_meta *meta; u32 i; dev_user_data = qed_dbg_get_user_data(p_hwfn); meta = &dev_user_data->mcp_trace_meta; if (!meta->is_allocated) return; /* Release modules */ if (meta->modules) { for (i = 0; i < meta->modules_num; i++) kfree(meta->modules[i]); kfree(meta->modules); } /* Release formats */ if (meta->formats) { for (i = 0; i < meta->formats_num; i++) kfree(meta->formats[i].format_str); kfree(meta->formats); } meta->is_allocated = false; } enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size) { return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size); } enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf) { u32 parsed_buf_size; return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size); } enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size) { return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size); } enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf) { u32 parsed_buf_size; return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size); } enum dbg_status qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size) { return qed_parse_protection_override_dump(dump_buf, NULL, results_buf_size); } enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf) { u32 parsed_buf_size; return qed_parse_protection_override_dump(dump_buf, results_buf, &parsed_buf_size); } enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size) { return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size); } enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf) { u32 parsed_buf_size; return qed_parse_fw_asserts_dump(dump_buf, results_buf, &parsed_buf_size); } enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, struct dbg_attn_block_result *results) { const u32 *block_attn_name_offsets; const char *attn_name_base; const char *block_name; enum dbg_attn_type attn_type; u8 num_regs, i, j; num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS); attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE); block_name = qed_dbg_get_block_name(p_hwfn, results->block_id); if (!block_name) return DBG_STATUS_INVALID_ARGS; if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr || !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr || !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr) return DBG_STATUS_DBG_ARRAY_NOT_SET; block_attn_name_offsets = (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr + results->names_offset; attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr; /* Go over registers with a non-zero attention status */ for (i = 0; i < num_regs; i++) { struct dbg_attn_bit_mapping *bit_mapping; struct dbg_attn_reg_result *reg_result; u8 num_reg_attn, bit_idx = 0; reg_result = &results->reg_results[i]; num_reg_attn = GET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_NUM_REG_ATTN); bit_mapping = (struct dbg_attn_bit_mapping *) p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr + reg_result->block_attn_offset; /* Go over attention status bits */ for (j = 0; j < num_reg_attn; j++) { u16 attn_idx_val = GET_FIELD(bit_mapping[j].data, DBG_ATTN_BIT_MAPPING_VAL); const char *attn_name, *attn_type_str, *masked_str; u32 attn_name_offset; u32 sts_addr; /* Check if bit mask should be advanced (due to unused * bits). */ if (GET_FIELD(bit_mapping[j].data, DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) { bit_idx += (u8)attn_idx_val; continue; } /* Check current bit index */ if (reg_result->sts_val & BIT(bit_idx)) { /* An attention bit with value=1 was found * Find attention name */ attn_name_offset = block_attn_name_offsets[attn_idx_val]; attn_name = attn_name_base + attn_name_offset; attn_type_str = (attn_type == ATTN_TYPE_INTERRUPT ? "Interrupt" : "Parity"); masked_str = reg_result->mask_val & BIT(bit_idx) ? " [masked]" : ""; sts_addr = GET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_STS_ADDRESS); DP_NOTICE(p_hwfn, "%s (%s) : %s [address 0x%08x, bit %d]%s\n", block_name, attn_type_str, attn_name, sts_addr * 4, bit_idx, masked_str); } bit_idx++; } } return DBG_STATUS_OK; } /* Wrapper for unifying the idle_chk and mcp_trace api */ static enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf) { u32 num_errors, num_warnnings; return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords, results_buf, &num_errors, &num_warnnings); } static DEFINE_MUTEX(qed_dbg_lock); #define MAX_PHY_RESULT_BUFFER 9000 /******************************** Feature Meta data section ******************/ #define GRC_NUM_STR_FUNCS 2 #define IDLE_CHK_NUM_STR_FUNCS 1 #define MCP_TRACE_NUM_STR_FUNCS 1 #define REG_FIFO_NUM_STR_FUNCS 1 #define IGU_FIFO_NUM_STR_FUNCS 1 #define PROTECTION_OVERRIDE_NUM_STR_FUNCS 1 #define FW_ASSERTS_NUM_STR_FUNCS 1 #define ILT_NUM_STR_FUNCS 1 #define PHY_NUM_STR_FUNCS 20 /* Feature meta data lookup table */ static struct { char *name; u32 num_funcs; enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *size); enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, u32 buf_size, u32 *dumped_dwords); enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf); enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); const struct qed_func_lookup *hsi_func_lookup; } qed_features_lookup[] = { { "grc", GRC_NUM_STR_FUNCS, qed_dbg_grc_get_dump_buf_size, qed_dbg_grc_dump, NULL, NULL, NULL}, { "idle_chk", IDLE_CHK_NUM_STR_FUNCS, qed_dbg_idle_chk_get_dump_buf_size, qed_dbg_idle_chk_dump, qed_print_idle_chk_results_wrapper, qed_get_idle_chk_results_buf_size, NULL}, { "mcp_trace", MCP_TRACE_NUM_STR_FUNCS, qed_dbg_mcp_trace_get_dump_buf_size, qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results, qed_get_mcp_trace_results_buf_size, NULL}, { "reg_fifo", REG_FIFO_NUM_STR_FUNCS, qed_dbg_reg_fifo_get_dump_buf_size, qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results, qed_get_reg_fifo_results_buf_size, NULL}, { "igu_fifo", IGU_FIFO_NUM_STR_FUNCS, qed_dbg_igu_fifo_get_dump_buf_size, qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results, qed_get_igu_fifo_results_buf_size, NULL}, { "protection_override", PROTECTION_OVERRIDE_NUM_STR_FUNCS, qed_dbg_protection_override_get_dump_buf_size, qed_dbg_protection_override_dump, qed_print_protection_override_results, qed_get_protection_override_results_buf_size, NULL}, { "fw_asserts", FW_ASSERTS_NUM_STR_FUNCS, qed_dbg_fw_asserts_get_dump_buf_size, qed_dbg_fw_asserts_dump, qed_print_fw_asserts_results, qed_get_fw_asserts_results_buf_size, NULL}, { "ilt", ILT_NUM_STR_FUNCS, qed_dbg_ilt_get_dump_buf_size, qed_dbg_ilt_dump, NULL, NULL, NULL},}; static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size) { u32 i, precision = 80; if (!p_text_buf) return; pr_notice("\n%.*s", precision, p_text_buf); for (i = precision; i < text_size; i += precision) pr_cont("%.*s", precision, p_text_buf + i); pr_cont("\n"); } #define QED_RESULTS_BUF_MIN_SIZE 16 /* Generic function for decoding debug feature info */ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, enum qed_dbg_features feature_idx) { struct qed_dbg_feature *feature = &p_hwfn->cdev->dbg_features[feature_idx]; u32 txt_size_bytes, null_char_pos, i; u32 *dbuf, dwords; enum dbg_status rc; char *text_buf; /* Check if feature supports formatting capability */ if (!qed_features_lookup[feature_idx].results_buf_size) return DBG_STATUS_OK; dbuf = (u32 *)feature->dump_buf; dwords = feature->dumped_dwords; /* Obtain size of formatted output */ rc = qed_features_lookup[feature_idx].results_buf_size(p_hwfn, dbuf, dwords, &txt_size_bytes); if (rc != DBG_STATUS_OK) return rc; /* Make sure that the allocated size is a multiple of dword * (4 bytes). */ null_char_pos = txt_size_bytes - 1; txt_size_bytes = (txt_size_bytes + 3) & ~0x3; if (txt_size_bytes < QED_RESULTS_BUF_MIN_SIZE) { DP_NOTICE(p_hwfn->cdev, "formatted size of feature was too small %d. Aborting\n", txt_size_bytes); return DBG_STATUS_INVALID_ARGS; } /* allocate temp text buf */ text_buf = vzalloc(txt_size_bytes); if (!text_buf) { DP_NOTICE(p_hwfn->cdev, "failed to allocate text buffer. Aborting\n"); return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; } /* Decode feature opcodes to string on temp buf */ rc = qed_features_lookup[feature_idx].print_results(p_hwfn, dbuf, dwords, text_buf); if (rc != DBG_STATUS_OK) { vfree(text_buf); return rc; } /* Replace the original null character with a '\n' character. * The bytes that were added as a result of the dword alignment are also * padded with '\n' characters. */ for (i = null_char_pos; i < txt_size_bytes; i++) text_buf[i] = '\n'; /* Dump printable feature to log */ if (p_hwfn->cdev->print_dbg_data) qed_dbg_print_feature(text_buf, txt_size_bytes); /* Dump binary data as is to the output file */ if (p_hwfn->cdev->dbg_bin_dump) { vfree(text_buf); return rc; } /* Free the old dump_buf and point the dump_buf to the newly allocated * and formatted text buffer. */ vfree(feature->dump_buf); feature->dump_buf = text_buf; feature->buf_size = txt_size_bytes; feature->dumped_dwords = txt_size_bytes / 4; return rc; } #define MAX_DBG_FEATURE_SIZE_DWORDS 0x3FFFFFFF /* Generic function for performing the dump of a debug feature. */ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_dbg_features feature_idx) { struct qed_dbg_feature *feature = &p_hwfn->cdev->dbg_features[feature_idx]; u32 buf_size_dwords, *dbuf, *dwords; enum dbg_status rc; DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n", qed_features_lookup[feature_idx].name); /* Dump_buf was already allocated need to free (this can happen if dump * was called but file was never read). * We can't use the buffer as is since size may have changed. */ if (feature->dump_buf) { vfree(feature->dump_buf); feature->dump_buf = NULL; } /* Get buffer size from hsi, allocate accordingly, and perform the * dump. */ rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt, &buf_size_dwords); if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED) return rc; if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) { feature->buf_size = 0; DP_NOTICE(p_hwfn->cdev, "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n", qed_features_lookup[feature_idx].name, buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS); return DBG_STATUS_OK; } feature->buf_size = buf_size_dwords * sizeof(u32); feature->dump_buf = vmalloc(feature->buf_size); if (!feature->dump_buf) return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; dbuf = (u32 *)feature->dump_buf; dwords = &feature->dumped_dwords; rc = qed_features_lookup[feature_idx].perform_dump(p_hwfn, p_ptt, dbuf, feature->buf_size / sizeof(u32), dwords); /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error. * In this case the buffer holds valid binary data, but we won't able * to parse it (since parsing relies on data in NVRAM which is only * accessible when MFW is responsive). skip the formatting but return * success so that binary data is provided. */ if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED) return DBG_STATUS_OK; if (rc != DBG_STATUS_OK) return rc; /* Format output */ rc = format_feature(p_hwfn, feature_idx); return rc; } int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) { return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes); } int qed_dbg_grc_size(struct qed_dev *cdev) { return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC); } int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) { return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK, num_dumped_bytes); } int qed_dbg_idle_chk_size(struct qed_dev *cdev) { return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK); } int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) { return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO, num_dumped_bytes); } int qed_dbg_reg_fifo_size(struct qed_dev *cdev) { return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO); } int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) { return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO, num_dumped_bytes); } int qed_dbg_igu_fifo_size(struct qed_dev *cdev) { return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO); } static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, u32 *length) { struct qed_nvm_image_att image_att; int rc; *length = 0; rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att); if (rc) return rc; *length = image_att.length; return rc; } static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes, enum qed_nvm_images image_id) { struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; u32 len_rounded; int rc; *num_dumped_bytes = 0; rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded); if (rc) return rc; DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"nvram image %d\"]\n", image_id); len_rounded = roundup(len_rounded, sizeof(u32)); rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded); if (rc) return rc; /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */ if (image_id != QED_NVM_IMAGE_NVM_META) cpu_to_be32_array((__force __be32 *)buffer, (const u32 *)buffer, len_rounded / sizeof(u32)); *num_dumped_bytes = len_rounded; return rc; } int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) { return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE, num_dumped_bytes); } int qed_dbg_protection_override_size(struct qed_dev *cdev) { return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE); } int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) { return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS, num_dumped_bytes); } int qed_dbg_fw_asserts_size(struct qed_dev *cdev) { return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS); } int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) { return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes); } int qed_dbg_ilt_size(struct qed_dev *cdev) { return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT); } int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) { return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE, num_dumped_bytes); } int qed_dbg_mcp_trace_size(struct qed_dev *cdev) { return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE); } /* Defines the amount of bytes allocated for recording the length of debugfs * feature buffer. */ #define REGDUMP_HEADER_SIZE sizeof(u32) #define REGDUMP_HEADER_SIZE_SHIFT 0 #define REGDUMP_HEADER_SIZE_MASK 0xffffff #define REGDUMP_HEADER_FEATURE_SHIFT 24 #define REGDUMP_HEADER_FEATURE_MASK 0x1f #define REGDUMP_HEADER_BIN_DUMP_SHIFT 29 #define REGDUMP_HEADER_BIN_DUMP_MASK 0x1 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30 #define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1 #define REGDUMP_HEADER_ENGINE_SHIFT 31 #define REGDUMP_HEADER_ENGINE_MASK 0x1 #define REGDUMP_MAX_SIZE 0x1000000 #define ILT_DUMP_MAX_SIZE (1024 * 1024 * 15) enum debug_print_features { OLD_MODE = 0, IDLE_CHK = 1, GRC_DUMP = 2, MCP_TRACE = 3, REG_FIFO = 4, PROTECTION_OVERRIDE = 5, IGU_FIFO = 6, PHY = 7, FW_ASSERTS = 8, NVM_CFG1 = 9, DEFAULT_CFG = 10, NVM_META = 11, MDUMP = 12, ILT_DUMP = 13, }; static u32 qed_calc_regdump_header(struct qed_dev *cdev, enum debug_print_features feature, int engine, u32 feature_size, u8 omit_engine, u8 dbg_bin_dump) { u32 res = 0; SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size); if (res != feature_size) DP_NOTICE(cdev, "Feature %d is too large (size 0x%x) and will corrupt the dump\n", feature, feature_size); SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature); SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, dbg_bin_dump); SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine); SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine); return res; } int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) { u8 cur_engine, omit_engine = 0, org_engine; struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; int grc_params[MAX_DBG_GRC_PARAMS], rc, i; u32 offset = 0, feature_size; for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) grc_params[i] = dev_data->grc.param_val[i]; if (!QED_IS_CMT(cdev)) omit_engine = 1; cdev->dbg_bin_dump = 1; mutex_lock(&qed_dbg_lock); org_engine = qed_get_debug_engine(cdev); for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { /* Collect idle_chks and grcDump for each hw function */ DP_VERBOSE(cdev, QED_MSG_DEBUG, "obtaining idle_chk and grcdump for current engine\n"); qed_set_debug_engine(cdev, cur_engine); /* First idle_chk */ rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine, feature_size, omit_engine, cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc); } /* Second idle_chk */ rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine, feature_size, omit_engine, cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc); } /* reg_fifo dump */ rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, REG_FIFO, cur_engine, feature_size, omit_engine, cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc); } /* igu_fifo dump */ rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine, feature_size, omit_engine, cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc); } /* protection_override dump */ rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE, cur_engine, feature_size, omit_engine, cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_protection_override failed. rc = %d\n", rc); } /* fw_asserts dump */ rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, FW_ASSERTS, cur_engine, feature_size, omit_engine, cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n", rc); } feature_size = qed_dbg_ilt_size(cdev); if (!cdev->disable_ilt_dump && feature_size < ILT_DUMP_MAX_SIZE) { rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, ILT_DUMP, cur_engine, feature_size, omit_engine, cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n", rc); } } /* Grc dump - must be last because when mcp stuck it will * clutter idle_chk, reg_fifo, ... */ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) dev_data->grc.param_val[i] = grc_params[i]; rc = qed_dbg_grc(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, GRC_DUMP, cur_engine, feature_size, omit_engine, cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc); } } qed_set_debug_engine(cdev, org_engine); /* mcp_trace */ rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine, feature_size, omit_engine, cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); } /* nvm cfg1 */ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size, QED_NVM_IMAGE_NVM_CFG1); if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine, feature_size, omit_engine, cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc); } /* nvm default */ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size, QED_NVM_IMAGE_DEFAULT_CFG); if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine, feature_size, omit_engine, cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG", rc); } /* nvm meta */ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size, QED_NVM_IMAGE_NVM_META); if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, NVM_META, cur_engine, feature_size, omit_engine, cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc); } /* nvm mdump */ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size, QED_NVM_IMAGE_MDUMP); if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, MDUMP, cur_engine, feature_size, omit_engine, cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc); } mutex_unlock(&qed_dbg_lock); cdev->dbg_bin_dump = 0; return 0; } int qed_dbg_all_data_size(struct qed_dev *cdev) { u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0; struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; u8 cur_engine, org_engine; cdev->disable_ilt_dump = false; org_engine = qed_get_debug_engine(cdev); for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { /* Engine specific */ DP_VERBOSE(cdev, QED_MSG_DEBUG, "calculating idle_chk and grcdump register length for current engine\n"); qed_set_debug_engine(cdev, cur_engine); regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) + REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) + REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) + REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) + REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) + REGDUMP_HEADER_SIZE + qed_dbg_protection_override_size(cdev) + REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev); ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev); if (ilt_len < ILT_DUMP_MAX_SIZE) { total_ilt_len += ilt_len; regs_len += ilt_len; } } qed_set_debug_engine(cdev, org_engine); /* Engine common */ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev) + REGDUMP_HEADER_SIZE + qed_dbg_phy_size(cdev); qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len); if (image_len) regs_len += REGDUMP_HEADER_SIZE + image_len; qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len); if (image_len) regs_len += REGDUMP_HEADER_SIZE + image_len; qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len); if (image_len) regs_len += REGDUMP_HEADER_SIZE + image_len; qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len); if (image_len) regs_len += REGDUMP_HEADER_SIZE + image_len; if (regs_len > REGDUMP_MAX_SIZE) { DP_VERBOSE(cdev, QED_MSG_DEBUG, "Dump exceeds max size 0x%x, disable ILT dump\n", REGDUMP_MAX_SIZE); cdev->disable_ilt_dump = true; regs_len -= total_ilt_len; } return regs_len; } int qed_dbg_feature(struct qed_dev *cdev, void *buffer, enum qed_dbg_features feature, u32 *num_dumped_bytes) { struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature]; struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; enum dbg_status dbg_rc; struct qed_ptt *p_ptt; int rc = 0; /* Acquire ptt */ p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EINVAL; /* Get dump */ dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature); if (dbg_rc != DBG_STATUS_OK) { DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n", qed_dbg_get_status_str(dbg_rc)); *num_dumped_bytes = 0; rc = -EINVAL; goto out; } DP_VERBOSE(cdev, QED_MSG_DEBUG, "copying debugfs feature to external buffer\n"); memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size); *num_dumped_bytes = cdev->dbg_features[feature].dumped_dwords * 4; out: qed_ptt_release(p_hwfn, p_ptt); return rc; } int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature) { struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature]; struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); u32 buf_size_dwords; enum dbg_status rc; if (!p_ptt) return -EINVAL; rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt, &buf_size_dwords); if (rc != DBG_STATUS_OK) buf_size_dwords = 0; /* Feature will not be dumped if it exceeds maximum size */ if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) buf_size_dwords = 0; qed_ptt_release(p_hwfn, p_ptt); qed_feature->buf_size = buf_size_dwords * sizeof(u32); return qed_feature->buf_size; } int qed_dbg_phy_size(struct qed_dev *cdev) { /* return max size of phy info and * phy mac_stat multiplied by the number of ports */ return MAX_PHY_RESULT_BUFFER * (1 + qed_device_num_ports(cdev)); } u8 qed_get_debug_engine(struct qed_dev *cdev) { return cdev->engine_for_debug; } void qed_set_debug_engine(struct qed_dev *cdev, int engine_number) { DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n", engine_number); cdev->engine_for_debug = engine_number; } void qed_dbg_pf_init(struct qed_dev *cdev) { const u8 *dbg_values = NULL; int i; /* Sync ver with debugbus qed code */ qed_dbg_set_app_ver(TOOLS_VERSION); /* Debug values are after init values. * The offset is the first dword of the file. */ dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data; for_each_hwfn(cdev, i) { qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values); qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values); } /* Set the hwfn to be 0 as default */ cdev->engine_for_debug = 0; } void qed_dbg_pf_exit(struct qed_dev *cdev) { struct qed_dbg_feature *feature = NULL; enum qed_dbg_features feature_idx; /* debug features' buffers may be allocated if debug feature was used * but dump wasn't called */ for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) { feature = &cdev->dbg_features[feature_idx]; if (feature->dump_buf) { vfree(feature->dump_buf); feature->dump_buf = NULL; } } }
linux-master
drivers/net/ethernet/qlogic/qed/qed_debug.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/stddef.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/delay.h> #include <asm/byteorder.h> #include <linux/dma-mapping.h> #include <linux/string.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/vmalloc.h> #include <linux/crash_dump.h> #include <linux/crc32.h> #include <linux/qed/qed_if.h> #include <linux/qed/qed_ll2_if.h> #include <net/devlink.h> #include <linux/phylink.h> #include "qed.h" #include "qed_sriov.h" #include "qed_sp.h" #include "qed_dev_api.h" #include "qed_ll2.h" #include "qed_fcoe.h" #include "qed_iscsi.h" #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_hw.h" #include "qed_selftest.h" #include "qed_debug.h" #include "qed_devlink.h" #define QED_ROCE_QPS (8192) #define QED_ROCE_DPIS (8) #define QED_RDMA_SRQS QED_ROCE_QPS #define QED_NVM_CFG_GET_FLAGS 0xA #define QED_NVM_CFG_GET_PF_FLAGS 0x1A #define QED_NVM_CFG_MAX_ATTRS 50 static char version[] = "QLogic FastLinQ 4xxxx Core Module qed\n"; MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); MODULE_LICENSE("GPL"); #define FW_FILE_VERSION \ __stringify(FW_MAJOR_VERSION) "." \ __stringify(FW_MINOR_VERSION) "." \ __stringify(FW_REVISION_VERSION) "." \ __stringify(FW_ENGINEERING_VERSION) #define QED_FW_FILE_NAME \ "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" MODULE_FIRMWARE(QED_FW_FILE_NAME); /* MFW speed capabilities maps */ struct qed_mfw_speed_map { u32 mfw_val; __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); const u32 *cap_arr; u32 arr_size; }; #define QED_MFW_SPEED_MAP(type, arr) \ { \ .mfw_val = (type), \ .cap_arr = (arr), \ .arr_size = ARRAY_SIZE(arr), \ } static const u32 qed_mfw_ext_1g[] __initconst = { ETHTOOL_LINK_MODE_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_1000baseX_Full_BIT, }; static const u32 qed_mfw_ext_10g[] __initconst = { ETHTOOL_LINK_MODE_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, }; static const u32 qed_mfw_ext_25g[] __initconst = { ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, }; static const u32 qed_mfw_ext_40g[] __initconst = { ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, }; static const u32 qed_mfw_ext_50g_base_r[] __initconst = { ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, }; static const u32 qed_mfw_ext_50g_base_r2[] __initconst = { ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, }; static const u32 qed_mfw_ext_100g_base_r2[] __initconst = { ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, }; static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, }; static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, qed_mfw_ext_50g_base_r), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2, qed_mfw_ext_50g_base_r2), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2, qed_mfw_ext_100g_base_r2), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4, qed_mfw_ext_100g_base_r4), }; static const u32 qed_mfw_legacy_1g[] __initconst = { ETHTOOL_LINK_MODE_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_1000baseX_Full_BIT, }; static const u32 qed_mfw_legacy_10g[] __initconst = { ETHTOOL_LINK_MODE_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, }; static const u32 qed_mfw_legacy_20g[] __initconst = { ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, }; static const u32 qed_mfw_legacy_25g[] __initconst = { ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, }; static const u32 qed_mfw_legacy_40g[] __initconst = { ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, }; static const u32 qed_mfw_legacy_50g[] __initconst = { ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, }; static const u32 qed_mfw_legacy_bb_100g[] __initconst = { ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, }; static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = { QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G, qed_mfw_legacy_1g), QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G, qed_mfw_legacy_10g), QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G, qed_mfw_legacy_20g), QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G, qed_mfw_legacy_25g), QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G, qed_mfw_legacy_40g), QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G, qed_mfw_legacy_50g), QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G, qed_mfw_legacy_bb_100g), }; static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map) { linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); map->cap_arr = NULL; map->arr_size = 0; } static void __init qed_mfw_speed_maps_init(void) { u32 i; for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) qed_mfw_speed_map_populate(qed_mfw_ext_maps + i); for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i); } static int __init qed_init(void) { pr_info("%s", version); qed_mfw_speed_maps_init(); return 0; } module_init(qed_init); static void __exit qed_exit(void) { /* To prevent marking this module as "permanent" */ } module_exit(qed_exit); static void qed_free_pci(struct qed_dev *cdev) { struct pci_dev *pdev = cdev->pdev; if (cdev->doorbells && cdev->db_size) iounmap(cdev->doorbells); if (cdev->regview) iounmap(cdev->regview); if (atomic_read(&pdev->enable_cnt) == 1) pci_release_regions(pdev); pci_disable_device(pdev); } #define PCI_REVISION_ID_ERROR_VAL 0xff /* Performs PCI initializations as well as initializing PCI-related parameters * in the device structrue. Returns 0 in case of success. */ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) { u8 rev_id; int rc; cdev->pdev = pdev; rc = pci_enable_device(pdev); if (rc) { DP_NOTICE(cdev, "Cannot enable PCI device\n"); goto err0; } if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { DP_NOTICE(cdev, "No memory region found in bar #0\n"); rc = -EIO; goto err1; } if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { DP_NOTICE(cdev, "No memory region found in bar #2\n"); rc = -EIO; goto err1; } if (atomic_read(&pdev->enable_cnt) == 1) { rc = pci_request_regions(pdev, "qed"); if (rc) { DP_NOTICE(cdev, "Failed to request PCI memory resources\n"); goto err1; } pci_set_master(pdev); pci_save_state(pdev); } pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); if (rev_id == PCI_REVISION_ID_ERROR_VAL) { DP_NOTICE(cdev, "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", rev_id); rc = -ENODEV; goto err2; } if (!pci_is_pcie(pdev)) { DP_NOTICE(cdev, "The bus is not PCI Express\n"); rc = -EIO; goto err2; } cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); if (IS_PF(cdev) && !cdev->pci_params.pm_cap) DP_NOTICE(cdev, "Cannot find power management capability\n"); rc = dma_set_mask_and_coherent(&cdev->pdev->dev, DMA_BIT_MASK(64)); if (rc) { DP_NOTICE(cdev, "Can't request DMA addresses\n"); rc = -EIO; goto err2; } cdev->pci_params.mem_start = pci_resource_start(pdev, 0); cdev->pci_params.mem_end = pci_resource_end(pdev, 0); cdev->pci_params.irq = pdev->irq; cdev->regview = pci_ioremap_bar(pdev, 0); if (!cdev->regview) { DP_NOTICE(cdev, "Cannot map register space, aborting\n"); rc = -ENOMEM; goto err2; } cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); cdev->db_size = pci_resource_len(cdev->pdev, 2); if (!cdev->db_size) { if (IS_PF(cdev)) { DP_NOTICE(cdev, "No Doorbell bar available\n"); return -EINVAL; } else { return 0; } } cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); if (!cdev->doorbells) { DP_NOTICE(cdev, "Cannot map doorbell space\n"); return -ENOMEM; } return 0; err2: pci_release_regions(pdev); err1: pci_disable_device(pdev); err0: return rc; } int qed_fill_dev_info(struct qed_dev *cdev, struct qed_dev_info *dev_info) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_hw_info *hw_info = &p_hwfn->hw_info; struct qed_tunnel_info *tun = &cdev->tunnel; struct qed_ptt *ptt; memset(dev_info, 0, sizeof(struct qed_dev_info)); if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && tun->vxlan.b_mode_enabled) dev_info->vxlan_enable = true; if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) dev_info->gre_enable = true; if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) dev_info->geneve_enable = true; dev_info->num_hwfns = cdev->num_hwfns; dev_info->pci_mem_start = cdev->pci_params.mem_start; dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); dev_info->dev_type = cdev->type; ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); if (IS_PF(cdev)) { dev_info->fw_major = FW_MAJOR_VERSION; dev_info->fw_minor = FW_MINOR_VERSION; dev_info->fw_rev = FW_REVISION_VERSION; dev_info->fw_eng = FW_ENGINEERING_VERSION; dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, &cdev->mf_bits); if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits)) dev_info->b_arfs_capable = true; dev_info->tx_switching = true; if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) dev_info->wol_support = true; dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); dev_info->esl = qed_mcp_is_esl_supported(p_hwfn); dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; } else { qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, &dev_info->fw_minor, &dev_info->fw_rev, &dev_info->fw_eng); } if (IS_PF(cdev)) { ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); if (ptt) { qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, &dev_info->mfw_rev, NULL); qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, &dev_info->mbi_version); qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, &dev_info->flash_size); qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); } } else { qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, &dev_info->mfw_rev, NULL); } dev_info->mtu = hw_info->mtu; cdev->common_dev_info = *dev_info; return 0; } static void qed_free_cdev(struct qed_dev *cdev) { kfree((void *)cdev); } static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) { struct qed_dev *cdev; cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (!cdev) return cdev; qed_init_struct(cdev); return cdev; } /* Sets the requested power state */ static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) { if (!cdev) return -ENODEV; DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); return 0; } /* probing */ static struct qed_dev *qed_probe(struct pci_dev *pdev, struct qed_probe_params *params) { struct qed_dev *cdev; int rc; cdev = qed_alloc_cdev(pdev); if (!cdev) goto err0; cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; cdev->protocol = params->protocol; if (params->is_vf) cdev->b_is_vf = true; qed_init_dp(cdev, params->dp_module, params->dp_level); cdev->recov_in_prog = params->recov_in_prog; rc = qed_init_pci(cdev, pdev); if (rc) { DP_ERR(cdev, "init pci failed\n"); goto err1; } DP_INFO(cdev, "PCI init completed successfully\n"); rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); if (rc) { DP_ERR(cdev, "hw prepare failed\n"); goto err2; } DP_INFO(cdev, "%s completed successfully\n", __func__); return cdev; err2: qed_free_pci(cdev); err1: qed_free_cdev(cdev); err0: return NULL; } static void qed_remove(struct qed_dev *cdev) { if (!cdev) return; qed_hw_remove(cdev); qed_free_pci(cdev); qed_set_power_state(cdev, PCI_D3hot); qed_free_cdev(cdev); } static void qed_disable_msix(struct qed_dev *cdev) { if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { pci_disable_msix(cdev->pdev); kfree(cdev->int_params.msix_table); } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { pci_disable_msi(cdev->pdev); } memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); } static int qed_enable_msix(struct qed_dev *cdev, struct qed_int_params *int_params) { int i, rc, cnt; cnt = int_params->in.num_vectors; for (i = 0; i < cnt; i++) int_params->msix_table[i].entry = i; rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, int_params->in.min_msix_cnt, cnt); if (rc < cnt && rc >= int_params->in.min_msix_cnt && (rc % cdev->num_hwfns)) { pci_disable_msix(cdev->pdev); /* If fastpath is initialized, we need at least one interrupt * per hwfn [and the slow path interrupts]. New requested number * should be a multiple of the number of hwfns. */ cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; DP_NOTICE(cdev, "Trying to enable MSI-X with less vectors (%d out of %d)\n", cnt, int_params->in.num_vectors); rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, cnt); if (!rc) rc = cnt; } /* For VFs, we should return with an error in case we didn't get the * exact number of msix vectors as we requested. * Not doing that will lead to a crash when starting queues for * this VF. */ if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { /* MSI-x configuration was achieved */ int_params->out.int_mode = QED_INT_MODE_MSIX; int_params->out.num_vectors = rc; rc = 0; } else { DP_NOTICE(cdev, "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", cnt, rc); } return rc; } /* This function outputs the int mode and the number of enabled msix vector */ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) { struct qed_int_params *int_params = &cdev->int_params; struct msix_entry *tbl; int rc = 0, cnt; switch (int_params->in.int_mode) { case QED_INT_MODE_MSIX: /* Allocate MSIX table */ cnt = int_params->in.num_vectors; int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); if (!int_params->msix_table) { rc = -ENOMEM; goto out; } /* Enable MSIX */ rc = qed_enable_msix(cdev, int_params); if (!rc) goto out; DP_NOTICE(cdev, "Failed to enable MSI-X\n"); kfree(int_params->msix_table); if (force_mode) goto out; fallthrough; case QED_INT_MODE_MSI: if (cdev->num_hwfns == 1) { rc = pci_enable_msi(cdev->pdev); if (!rc) { int_params->out.int_mode = QED_INT_MODE_MSI; goto out; } DP_NOTICE(cdev, "Failed to enable MSI\n"); if (force_mode) goto out; } fallthrough; case QED_INT_MODE_INTA: int_params->out.int_mode = QED_INT_MODE_INTA; rc = 0; goto out; default: DP_NOTICE(cdev, "Unknown int_mode value %d\n", int_params->in.int_mode); rc = -EINVAL; } out: if (!rc) DP_INFO(cdev, "Using %s interrupts\n", int_params->out.int_mode == QED_INT_MODE_INTA ? "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? "MSI" : "MSIX"); cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; return rc; } static void qed_simd_handler_config(struct qed_dev *cdev, void *token, int index, void(*handler)(void *)) { struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; int relative_idx = index / cdev->num_hwfns; hwfn->simd_proto_handler[relative_idx].func = handler; hwfn->simd_proto_handler[relative_idx].token = token; } static void qed_simd_handler_clean(struct qed_dev *cdev, int index) { struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; int relative_idx = index / cdev->num_hwfns; memset(&hwfn->simd_proto_handler[relative_idx], 0, sizeof(struct qed_simd_fp_handler)); } static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) { tasklet_schedule((struct tasklet_struct *)tasklet); return IRQ_HANDLED; } static irqreturn_t qed_single_int(int irq, void *dev_instance) { struct qed_dev *cdev = (struct qed_dev *)dev_instance; struct qed_hwfn *hwfn; irqreturn_t rc = IRQ_NONE; u64 status; int i, j; for (i = 0; i < cdev->num_hwfns; i++) { status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); if (!status) continue; hwfn = &cdev->hwfns[i]; /* Slowpath interrupt */ if (unlikely(status & 0x1)) { tasklet_schedule(&hwfn->sp_dpc); status &= ~0x1; rc = IRQ_HANDLED; } /* Fastpath interrupts */ for (j = 0; j < 64; j++) { if ((0x2ULL << j) & status) { struct qed_simd_fp_handler *p_handler = &hwfn->simd_proto_handler[j]; if (p_handler->func) p_handler->func(p_handler->token); else DP_NOTICE(hwfn, "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", j, status); status &= ~(0x2ULL << j); rc = IRQ_HANDLED; } } if (unlikely(status)) DP_VERBOSE(hwfn, NETIF_MSG_INTR, "got an unknown interrupt status 0x%llx\n", status); } return rc; } int qed_slowpath_irq_req(struct qed_hwfn *hwfn) { struct qed_dev *cdev = hwfn->cdev; u32 int_mode; int rc = 0; u8 id; int_mode = cdev->int_params.out.int_mode; if (int_mode == QED_INT_MODE_MSIX) { id = hwfn->my_id; snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", id, cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); rc = request_irq(cdev->int_params.msix_table[id].vector, qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc); } else { unsigned long flags = 0; snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), PCI_FUNC(cdev->pdev->devfn)); if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) flags |= IRQF_SHARED; rc = request_irq(cdev->pdev->irq, qed_single_int, flags, cdev->name, cdev); } if (rc) DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); else DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), "Requested slowpath %s\n", (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); return rc; } static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) { /* Calling the disable function will make sure that any * currently-running function is completed. The following call to the * enable function makes this sequence a flush-like operation. */ if (p_hwfn->b_sp_dpc_enabled) { tasklet_disable(&p_hwfn->sp_dpc); tasklet_enable(&p_hwfn->sp_dpc); } } void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) { struct qed_dev *cdev = p_hwfn->cdev; u8 id = p_hwfn->my_id; u32 int_mode; int_mode = cdev->int_params.out.int_mode; if (int_mode == QED_INT_MODE_MSIX) synchronize_irq(cdev->int_params.msix_table[id].vector); else synchronize_irq(cdev->pdev->irq); qed_slowpath_tasklet_flush(p_hwfn); } static void qed_slowpath_irq_free(struct qed_dev *cdev) { int i; if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { for_each_hwfn(cdev, i) { if (!cdev->hwfns[i].b_int_requested) break; free_irq(cdev->int_params.msix_table[i].vector, &cdev->hwfns[i].sp_dpc); } } else { if (QED_LEADING_HWFN(cdev)->b_int_requested) free_irq(cdev->pdev->irq, cdev); } qed_int_disable_post_isr_release(cdev); } static int qed_nic_stop(struct qed_dev *cdev) { int i, rc; rc = qed_hw_stop(cdev); for (i = 0; i < cdev->num_hwfns; i++) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; if (p_hwfn->b_sp_dpc_enabled) { tasklet_disable(&p_hwfn->sp_dpc); p_hwfn->b_sp_dpc_enabled = false; DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, "Disabled sp tasklet [hwfn %d] at %p\n", i, &p_hwfn->sp_dpc); } } qed_dbg_pf_exit(cdev); return rc; } static int qed_nic_setup(struct qed_dev *cdev) { int rc, i; /* Determine if interface is going to require LL2 */ if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { for (i = 0; i < cdev->num_hwfns; i++) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; p_hwfn->using_ll2 = true; } } rc = qed_resc_alloc(cdev); if (rc) return rc; DP_INFO(cdev, "Allocated qed resources\n"); qed_resc_setup(cdev); return rc; } static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) { int limit = 0; /* Mark the fastpath as free/used */ cdev->int_params.fp_initialized = cnt ? true : false; if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) limit = cdev->num_hwfns * 63; else if (cdev->int_params.fp_msix_cnt) limit = cdev->int_params.fp_msix_cnt; if (!limit) return -ENOMEM; return min_t(int, cnt, limit); } static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) { memset(info, 0, sizeof(struct qed_int_info)); if (!cdev->int_params.fp_initialized) { DP_INFO(cdev, "Protocol driver requested interrupt information, but its support is not yet configured\n"); return -EINVAL; } /* Need to expose only MSI-X information; Single IRQ is handled solely * by qed. */ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { int msix_base = cdev->int_params.fp_msix_base; info->msix_cnt = cdev->int_params.fp_msix_cnt; info->msix = &cdev->int_params.msix_table[msix_base]; } return 0; } static int qed_slowpath_setup_int(struct qed_dev *cdev, enum qed_int_mode int_mode) { struct qed_sb_cnt_info sb_cnt_info; int num_l2_queues = 0; int rc; int i; if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); return -EINVAL; } memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); cdev->int_params.in.int_mode = int_mode; for_each_hwfn(cdev, i) { memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); cdev->int_params.in.num_vectors += sb_cnt_info.cnt; cdev->int_params.in.num_vectors++; /* slowpath */ } /* We want a minimum of one slowpath and one fastpath vector per hwfn */ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; if (is_kdump_kernel()) { DP_INFO(cdev, "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", cdev->int_params.in.min_msix_cnt); cdev->int_params.in.num_vectors = cdev->int_params.in.min_msix_cnt; } rc = qed_set_int_mode(cdev, false); if (rc) { DP_ERR(cdev, "%s ERR\n", __func__); return rc; } cdev->int_params.fp_msix_base = cdev->num_hwfns; cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - cdev->num_hwfns; if (!IS_ENABLED(CONFIG_QED_RDMA) || !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) return 0; for_each_hwfn(cdev, i) num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); DP_VERBOSE(cdev, QED_MSG_RDMA, "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", cdev->int_params.fp_msix_cnt, num_l2_queues); if (cdev->int_params.fp_msix_cnt > num_l2_queues) { cdev->int_params.rdma_msix_cnt = (cdev->int_params.fp_msix_cnt - num_l2_queues) / cdev->num_hwfns; cdev->int_params.rdma_msix_base = cdev->int_params.fp_msix_base + num_l2_queues; cdev->int_params.fp_msix_cnt = num_l2_queues; } else { cdev->int_params.rdma_msix_cnt = 0; } DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", cdev->int_params.rdma_msix_cnt, cdev->int_params.rdma_msix_base); return 0; } static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) { int rc; memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &cdev->int_params.in.num_vectors); if (cdev->num_hwfns > 1) { u8 vectors = 0; qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); cdev->int_params.in.num_vectors += vectors; } /* We want a minimum of one fastpath vector per vf hwfn */ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; rc = qed_set_int_mode(cdev, true); if (rc) return rc; cdev->int_params.fp_msix_base = 0; cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; return 0; } u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u8 *input_buf, u32 max_size, u8 *unzip_buf) { int rc; p_hwfn->stream->next_in = input_buf; p_hwfn->stream->avail_in = input_len; p_hwfn->stream->next_out = unzip_buf; p_hwfn->stream->avail_out = max_size; rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); if (rc != Z_OK) { DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", rc); return 0; } rc = zlib_inflate(p_hwfn->stream, Z_FINISH); zlib_inflateEnd(p_hwfn->stream); if (rc != Z_OK && rc != Z_STREAM_END) { DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg, rc); return 0; } return p_hwfn->stream->total_out / 4; } static int qed_alloc_stream_mem(struct qed_dev *cdev) { int i; void *workspace; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); if (!p_hwfn->stream) return -ENOMEM; workspace = vzalloc(zlib_inflate_workspacesize()); if (!workspace) return -ENOMEM; p_hwfn->stream->workspace = workspace; } return 0; } static void qed_free_stream_mem(struct qed_dev *cdev) { int i; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; if (!p_hwfn->stream) return; vfree(p_hwfn->stream->workspace); kfree(p_hwfn->stream); } } static void qed_update_pf_params(struct qed_dev *cdev, struct qed_pf_params *params) { int i; if (IS_ENABLED(CONFIG_QED_RDMA)) { params->rdma_pf_params.num_qps = QED_ROCE_QPS; params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; /* divide by 3 the MRs to avoid MF ILT overflow */ params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; } if (cdev->num_hwfns > 1 || IS_VF(cdev)) params->eth_pf_params.num_arfs_filters = 0; /* In case we might support RDMA, don't allow qede to be greedy * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] * per hwfn. */ if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { u16 *num_cons; num_cons = &params->eth_pf_params.num_cons; *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); } for (i = 0; i < cdev->num_hwfns; i++) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; p_hwfn->pf_params = *params; } } #define QED_PERIODIC_DB_REC_COUNT 10 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 #define QED_PERIODIC_DB_REC_INTERVAL \ msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, enum qed_slowpath_wq_flag wq_flag, unsigned long delay) { if (!hwfn->slowpath_wq_active) return -EINVAL; /* Memory barrier for setting atomic bit */ smp_mb__before_atomic(); set_bit(wq_flag, &hwfn->slowpath_task_flags); /* Memory barrier after setting atomic bit */ smp_mb__after_atomic(); queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); return 0; } void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) { /* Reset periodic Doorbell Recovery counter */ p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; /* Don't schedule periodic Doorbell Recovery if already scheduled */ if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, &p_hwfn->slowpath_task_flags)) return; qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, QED_PERIODIC_DB_REC_INTERVAL); } static void qed_slowpath_wq_stop(struct qed_dev *cdev) { int i; if (IS_VF(cdev)) return; for_each_hwfn(cdev, i) { if (!cdev->hwfns[i].slowpath_wq) continue; /* Stop queuing new delayed works */ cdev->hwfns[i].slowpath_wq_active = false; cancel_delayed_work(&cdev->hwfns[i].slowpath_task); destroy_workqueue(cdev->hwfns[i].slowpath_wq); } } static void qed_slowpath_task(struct work_struct *work) { struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, slowpath_task.work); struct qed_ptt *ptt = qed_ptt_acquire(hwfn); if (!ptt) { if (hwfn->slowpath_wq_active) queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); return; } if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags)) qed_mfw_process_tlv_req(hwfn, ptt); if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, &hwfn->slowpath_task_flags)) { /* skip qed_db_rec_handler during recovery/unload */ if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active) goto out; qed_db_rec_handler(hwfn, ptt); if (hwfn->periodic_db_rec_count--) qed_slowpath_delayed_work(hwfn, QED_SLOWPATH_PERIODIC_DB_REC, QED_PERIODIC_DB_REC_INTERVAL); } out: qed_ptt_release(hwfn, ptt); } static int qed_slowpath_wq_start(struct qed_dev *cdev) { struct qed_hwfn *hwfn; char name[NAME_SIZE]; int i; if (IS_VF(cdev)) return 0; for_each_hwfn(cdev, i) { hwfn = &cdev->hwfns[i]; snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); if (!hwfn->slowpath_wq) { DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); return -ENOMEM; } INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); hwfn->slowpath_wq_active = true; } return 0; } static int qed_slowpath_start(struct qed_dev *cdev, struct qed_slowpath_params *params) { struct qed_drv_load_params drv_load_params; struct qed_hw_init_params hw_init_params; struct qed_mcp_drv_version drv_version; struct qed_tunnel_info tunn_info; const u8 *data = NULL; struct qed_hwfn *hwfn; struct qed_ptt *p_ptt; int rc = -EINVAL; if (qed_iov_wq_start(cdev)) goto err; if (qed_slowpath_wq_start(cdev)) goto err; if (IS_PF(cdev)) { rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, &cdev->pdev->dev); if (rc) { DP_NOTICE(cdev, "Failed to find fw file - /lib/firmware/%s\n", QED_FW_FILE_NAME); goto err; } if (cdev->num_hwfns == 1) { p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); if (p_ptt) { QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; } else { DP_NOTICE(cdev, "Failed to acquire PTT for aRFS\n"); rc = -EINVAL; goto err; } } } cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; rc = qed_nic_setup(cdev); if (rc) goto err; if (IS_PF(cdev)) rc = qed_slowpath_setup_int(cdev, params->int_mode); else rc = qed_slowpath_vf_setup_int(cdev); if (rc) goto err1; if (IS_PF(cdev)) { /* Allocate stream for unzipping */ rc = qed_alloc_stream_mem(cdev); if (rc) goto err2; /* First Dword used to differentiate between various sources */ data = cdev->firmware->data + sizeof(u32); qed_dbg_pf_init(cdev); } /* Start the slowpath */ memset(&hw_init_params, 0, sizeof(hw_init_params)); memset(&tunn_info, 0, sizeof(tunn_info)); tunn_info.vxlan.b_mode_enabled = true; tunn_info.l2_gre.b_mode_enabled = true; tunn_info.ip_gre.b_mode_enabled = true; tunn_info.l2_geneve.b_mode_enabled = true; tunn_info.ip_geneve.b_mode_enabled = true; tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; hw_init_params.p_tunn = &tunn_info; hw_init_params.b_hw_start = true; hw_init_params.int_mode = cdev->int_params.out.int_mode; hw_init_params.allow_npar_tx_switch = true; hw_init_params.bin_fw_data = data; memset(&drv_load_params, 0, sizeof(drv_load_params)); drv_load_params.is_crash_kernel = is_kdump_kernel(); drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; drv_load_params.avoid_eng_reset = false; drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; hw_init_params.p_drv_load_params = &drv_load_params; rc = qed_hw_init(cdev, &hw_init_params); if (rc) goto err2; DP_INFO(cdev, "HW initialization and function start completed successfully\n"); if (IS_PF(cdev)) { cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | BIT(QED_MODE_L2GENEVE_TUNN) | BIT(QED_MODE_IPGENEVE_TUNN) | BIT(QED_MODE_L2GRE_TUNN) | BIT(QED_MODE_IPGRE_TUNN)); } /* Allocate LL2 interface if needed */ if (QED_LEADING_HWFN(cdev)->using_ll2) { rc = qed_ll2_alloc_if(cdev); if (rc) goto err3; } if (IS_PF(cdev)) { hwfn = QED_LEADING_HWFN(cdev); drv_version.version = (params->drv_major << 24) | (params->drv_minor << 16) | (params->drv_rev << 8) | (params->drv_eng); strscpy(drv_version.name, params->name, MCP_DRV_VER_STR_SIZE - 4); rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, &drv_version); if (rc) { DP_NOTICE(cdev, "Failed sending drv version command\n"); goto err4; } } qed_reset_vport_stats(cdev); return 0; err4: qed_ll2_dealloc_if(cdev); err3: qed_hw_stop(cdev); err2: qed_hw_timers_stop_all(cdev); if (IS_PF(cdev)) qed_slowpath_irq_free(cdev); qed_free_stream_mem(cdev); qed_disable_msix(cdev); err1: qed_resc_free(cdev); err: if (IS_PF(cdev)) release_firmware(cdev->firmware); if (IS_PF(cdev) && (cdev->num_hwfns == 1) && QED_LEADING_HWFN(cdev)->p_arfs_ptt) qed_ptt_release(QED_LEADING_HWFN(cdev), QED_LEADING_HWFN(cdev)->p_arfs_ptt); qed_iov_wq_stop(cdev, false); qed_slowpath_wq_stop(cdev); return rc; } static int qed_slowpath_stop(struct qed_dev *cdev) { if (!cdev) return -ENODEV; qed_slowpath_wq_stop(cdev); qed_ll2_dealloc_if(cdev); if (IS_PF(cdev)) { if (cdev->num_hwfns == 1) qed_ptt_release(QED_LEADING_HWFN(cdev), QED_LEADING_HWFN(cdev)->p_arfs_ptt); qed_free_stream_mem(cdev); if (IS_QED_ETH_IF(cdev)) qed_sriov_disable(cdev, true); } qed_nic_stop(cdev); if (IS_PF(cdev)) qed_slowpath_irq_free(cdev); qed_disable_msix(cdev); qed_resc_free(cdev); qed_iov_wq_stop(cdev, true); if (IS_PF(cdev)) release_firmware(cdev->firmware); return 0; } static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) { int i; memcpy(cdev->name, name, NAME_SIZE); for_each_hwfn(cdev, i) snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); } static u32 qed_sb_init(struct qed_dev *cdev, struct qed_sb_info *sb_info, void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id, enum qed_sb_type type) { struct qed_hwfn *p_hwfn; struct qed_ptt *p_ptt; u16 rel_sb_id; u32 rc; /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ if (type == QED_SB_TYPE_L2_QUEUE) { p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; rel_sb_id = sb_id / cdev->num_hwfns; } else { p_hwfn = QED_AFFIN_HWFN(cdev); rel_sb_id = sb_id; } DP_VERBOSE(cdev, NETIF_MSG_INTR, "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); if (IS_PF(p_hwfn->cdev)) { p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EBUSY; rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, sb_phy_addr, rel_sb_id); qed_ptt_release(p_hwfn, p_ptt); } else { rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, sb_phy_addr, rel_sb_id); } return rc; } static u32 qed_sb_release(struct qed_dev *cdev, struct qed_sb_info *sb_info, u16 sb_id, enum qed_sb_type type) { struct qed_hwfn *p_hwfn; u16 rel_sb_id; u32 rc; /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ if (type == QED_SB_TYPE_L2_QUEUE) { p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; rel_sb_id = sb_id / cdev->num_hwfns; } else { p_hwfn = QED_AFFIN_HWFN(cdev); rel_sb_id = sb_id; } DP_VERBOSE(cdev, NETIF_MSG_INTR, "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); return rc; } static bool qed_can_link_change(struct qed_dev *cdev) { return true; } static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params, const struct qed_link_params *params) { struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed; const struct qed_mfw_speed_map *map; u32 i; if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) ext_speed->autoneg = !!params->autoneg; if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { ext_speed->advertised_speeds = 0; for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) { map = qed_mfw_ext_maps + i; if (linkmode_intersects(params->adv_speeds, map->caps)) ext_speed->advertised_speeds |= map->mfw_val; } } if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) { switch (params->forced_speed) { case SPEED_1000: ext_speed->forced_speed = QED_EXT_SPEED_1G; break; case SPEED_10000: ext_speed->forced_speed = QED_EXT_SPEED_10G; break; case SPEED_20000: ext_speed->forced_speed = QED_EXT_SPEED_20G; break; case SPEED_25000: ext_speed->forced_speed = QED_EXT_SPEED_25G; break; case SPEED_40000: ext_speed->forced_speed = QED_EXT_SPEED_40G; break; case SPEED_50000: ext_speed->forced_speed = QED_EXT_SPEED_50G_R | QED_EXT_SPEED_50G_R2; break; case SPEED_100000: ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 | QED_EXT_SPEED_100G_R4 | QED_EXT_SPEED_100G_P4; break; default: break; } } if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)) return; switch (params->forced_speed) { case SPEED_25000: switch (params->fec) { case FEC_FORCE_MODE_NONE: link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE; break; case FEC_FORCE_MODE_FIRECODE: link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R; break; case FEC_FORCE_MODE_RS: link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528; break; case FEC_FORCE_MODE_AUTO: link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 | ETH_EXT_FEC_25G_BASE_R | ETH_EXT_FEC_25G_NONE; break; default: break; } break; case SPEED_40000: switch (params->fec) { case FEC_FORCE_MODE_NONE: link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE; break; case FEC_FORCE_MODE_FIRECODE: link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R; break; case FEC_FORCE_MODE_AUTO: link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R | ETH_EXT_FEC_40G_NONE; break; default: break; } break; case SPEED_50000: switch (params->fec) { case FEC_FORCE_MODE_NONE: link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE; break; case FEC_FORCE_MODE_FIRECODE: link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R; break; case FEC_FORCE_MODE_RS: link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528; break; case FEC_FORCE_MODE_AUTO: link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 | ETH_EXT_FEC_50G_BASE_R | ETH_EXT_FEC_50G_NONE; break; default: break; } break; case SPEED_100000: switch (params->fec) { case FEC_FORCE_MODE_NONE: link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE; break; case FEC_FORCE_MODE_FIRECODE: link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R; break; case FEC_FORCE_MODE_RS: link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528; break; case FEC_FORCE_MODE_AUTO: link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 | ETH_EXT_FEC_100G_BASE_R | ETH_EXT_FEC_100G_NONE; break; default: break; } break; default: break; } } static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) { struct qed_mcp_link_params *link_params; struct qed_mcp_link_speed_params *speed; const struct qed_mfw_speed_map *map; struct qed_hwfn *hwfn; struct qed_ptt *ptt; int rc; u32 i; if (!cdev) return -ENODEV; /* The link should be set only once per PF */ hwfn = &cdev->hwfns[0]; /* When VF wants to set link, force it to read the bulletin instead. * This mimics the PF behavior, where a noitification [both immediate * and possible later] would be generated when changing properties. */ if (IS_VF(cdev)) { qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); return 0; } ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EBUSY; link_params = qed_mcp_get_link_params(hwfn); if (!link_params) return -ENODATA; speed = &link_params->speed; if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) speed->autoneg = !!params->autoneg; if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { speed->advertised_speeds = 0; for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) { map = qed_mfw_legacy_maps + i; if (linkmode_intersects(params->adv_speeds, map->caps)) speed->advertised_speeds |= map->mfw_val; } } if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) speed->forced_speed = params->forced_speed; if (qed_mcp_is_ext_speed_supported(hwfn)) qed_set_ext_speed_params(link_params, params); if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) link_params->pause.autoneg = true; else link_params->pause.autoneg = false; if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) link_params->pause.forced_rx = true; else link_params->pause.forced_rx = false; if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) link_params->pause.forced_tx = true; else link_params->pause.forced_tx = false; } if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { switch (params->loopback_mode) { case QED_LINK_LOOPBACK_INT_PHY: link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; break; case QED_LINK_LOOPBACK_EXT_PHY: link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; break; case QED_LINK_LOOPBACK_EXT: link_params->loopback_mode = ETH_LOOPBACK_EXT; break; case QED_LINK_LOOPBACK_MAC: link_params->loopback_mode = ETH_LOOPBACK_MAC; break; case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123: link_params->loopback_mode = ETH_LOOPBACK_CNIG_AH_ONLY_0123; break; case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301: link_params->loopback_mode = ETH_LOOPBACK_CNIG_AH_ONLY_2301; break; case QED_LINK_LOOPBACK_PCS_AH_ONLY: link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY; break; case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY: link_params->loopback_mode = ETH_LOOPBACK_REVERSE_MAC_AH_ONLY; break; case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY: link_params->loopback_mode = ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY; break; default: link_params->loopback_mode = ETH_LOOPBACK_NONE; break; } } if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) memcpy(&link_params->eee, &params->eee, sizeof(link_params->eee)); if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG) link_params->fec = params->fec; rc = qed_mcp_set_link(hwfn, ptt, params->link_up); qed_ptt_release(hwfn, ptt); return rc; } static int qed_get_port_type(u32 media_type) { int port_type; switch (media_type) { case MEDIA_SFPP_10G_FIBER: case MEDIA_SFP_1G_FIBER: case MEDIA_XFP_FIBER: case MEDIA_MODULE_FIBER: port_type = PORT_FIBRE; break; case MEDIA_DA_TWINAX: port_type = PORT_DA; break; case MEDIA_BASE_T: port_type = PORT_TP; break; case MEDIA_KR: case MEDIA_NOT_PRESENT: port_type = PORT_NONE; break; case MEDIA_UNSPECIFIED: default: port_type = PORT_OTHER; break; } return port_type; } static int qed_get_link_data(struct qed_hwfn *hwfn, struct qed_mcp_link_params *params, struct qed_mcp_link_state *link, struct qed_mcp_link_capabilities *link_caps) { void *p; if (!IS_PF(hwfn->cdev)) { qed_vf_get_link_params(hwfn, params); qed_vf_get_link_state(hwfn, link); qed_vf_get_link_caps(hwfn, link_caps); return 0; } p = qed_mcp_get_link_params(hwfn); if (!p) return -ENXIO; memcpy(params, p, sizeof(*params)); p = qed_mcp_get_link_state(hwfn); if (!p) return -ENXIO; memcpy(link, p, sizeof(*link)); p = qed_mcp_get_link_capabilities(hwfn); if (!p) return -ENXIO; memcpy(link_caps, p, sizeof(*link_caps)); return 0; } static void qed_fill_link_capability(struct qed_hwfn *hwfn, struct qed_ptt *ptt, u32 capability, unsigned long *if_caps) { u32 media_type, tcvr_state, tcvr_type; u32 speed_mask, board_cfg; if (qed_mcp_get_media_type(hwfn, ptt, &media_type)) media_type = MEDIA_UNSPECIFIED; if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type)) tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask)) speed_mask = 0xFFFFFFFF; if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg)) board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n", media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); switch (media_type) { case MEDIA_DA_TWINAX: phylink_set(if_caps, FIBRE); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) phylink_set(if_caps, 20000baseKR2_Full); /* For DAC media multiple speed capabilities are supported */ capability |= speed_mask; if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) phylink_set(if_caps, 1000baseKX_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) phylink_set(if_caps, 10000baseCR_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_40G_CR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: phylink_set(if_caps, 40000baseCR4_Full); break; default: break; } if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) phylink_set(if_caps, 25000baseCR_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) phylink_set(if_caps, 50000baseCR2_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_100G_CR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: phylink_set(if_caps, 100000baseCR4_Full); break; default: break; } break; case MEDIA_BASE_T: phylink_set(if_caps, TP); if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) phylink_set(if_caps, 1000baseT_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) phylink_set(if_caps, 10000baseT_Full); } if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { phylink_set(if_caps, FIBRE); switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_1000BASET: phylink_set(if_caps, 1000baseT_Full); break; case ETH_TRANSCEIVER_TYPE_10G_BASET: phylink_set(if_caps, 10000baseT_Full); break; default: break; } } break; case MEDIA_SFP_1G_FIBER: case MEDIA_SFPP_10G_FIBER: case MEDIA_XFP_FIBER: case MEDIA_MODULE_FIBER: phylink_set(if_caps, FIBRE); capability |= speed_mask; if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_1G_LX: case ETH_TRANSCEIVER_TYPE_1G_SX: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: phylink_set(if_caps, 1000baseKX_Full); break; default: break; } if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_10G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: phylink_set(if_caps, 10000baseSR_Full); break; case ETH_TRANSCEIVER_TYPE_10G_LR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: phylink_set(if_caps, 10000baseLR_Full); break; case ETH_TRANSCEIVER_TYPE_10G_LRM: phylink_set(if_caps, 10000baseLRM_Full); break; case ETH_TRANSCEIVER_TYPE_10G_ER: phylink_set(if_caps, 10000baseR_FEC); break; default: break; } if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) phylink_set(if_caps, 20000baseKR2_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_25G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: phylink_set(if_caps, 25000baseSR_Full); break; default: break; } if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_40G_LR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: phylink_set(if_caps, 40000baseLR4_Full); break; case ETH_TRANSCEIVER_TYPE_40G_SR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: phylink_set(if_caps, 40000baseSR4_Full); break; default: break; } if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) phylink_set(if_caps, 50000baseKR2_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) switch (tcvr_type) { case ETH_TRANSCEIVER_TYPE_100G_SR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: phylink_set(if_caps, 100000baseSR4_Full); break; case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: phylink_set(if_caps, 100000baseLR4_ER4_Full); break; default: break; } break; case MEDIA_KR: phylink_set(if_caps, Backplane); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) phylink_set(if_caps, 20000baseKR2_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) phylink_set(if_caps, 1000baseKX_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) phylink_set(if_caps, 10000baseKR_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) phylink_set(if_caps, 25000baseKR_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) phylink_set(if_caps, 40000baseKR4_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) phylink_set(if_caps, 50000baseKR2_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) phylink_set(if_caps, 100000baseKR4_Full); break; case MEDIA_UNSPECIFIED: case MEDIA_NOT_PRESENT: default: DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, "Unknown media and transceiver type;\n"); break; } } static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask) { *speed_mask = 0; if (caps & (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD)) *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; if (caps & QED_LINK_PARTNER_SPEED_10G) *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; if (caps & QED_LINK_PARTNER_SPEED_20G) *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; if (caps & QED_LINK_PARTNER_SPEED_25G) *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; if (caps & QED_LINK_PARTNER_SPEED_40G) *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; if (caps & QED_LINK_PARTNER_SPEED_50G) *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; if (caps & QED_LINK_PARTNER_SPEED_100G) *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; } static void qed_fill_link(struct qed_hwfn *hwfn, struct qed_ptt *ptt, struct qed_link_output *if_link) { struct qed_mcp_link_capabilities link_caps; struct qed_mcp_link_params params; struct qed_mcp_link_state link; u32 media_type, speed_mask; memset(if_link, 0, sizeof(*if_link)); /* Prepare source inputs */ if (qed_get_link_data(hwfn, &params, &link, &link_caps)) { dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); return; } /* Set the link parameters to pass to protocol driver */ if (link.link_up) if_link->link_up = true; if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) { if (link_caps.default_ext_autoneg) phylink_set(if_link->supported_caps, Autoneg); linkmode_copy(if_link->advertised_caps, if_link->supported_caps); if (params.ext_speed.autoneg) phylink_set(if_link->advertised_caps, Autoneg); else phylink_clear(if_link->advertised_caps, Autoneg); qed_fill_link_capability(hwfn, ptt, params.ext_speed.advertised_speeds, if_link->advertised_caps); } else { if (link_caps.default_speed_autoneg) phylink_set(if_link->supported_caps, Autoneg); linkmode_copy(if_link->advertised_caps, if_link->supported_caps); if (params.speed.autoneg) phylink_set(if_link->advertised_caps, Autoneg); else phylink_clear(if_link->advertised_caps, Autoneg); } if (params.pause.autoneg || (params.pause.forced_rx && params.pause.forced_tx)) phylink_set(if_link->supported_caps, Asym_Pause); if (params.pause.autoneg || params.pause.forced_rx || params.pause.forced_tx) phylink_set(if_link->supported_caps, Pause); if_link->sup_fec = link_caps.fec_default; if_link->active_fec = params.fec; /* Fill link advertised capability */ qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, if_link->advertised_caps); /* Fill link supported capability */ qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, if_link->supported_caps); /* Fill partner advertised capability */ qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask); qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps); if (link.link_up) if_link->speed = link.speed; /* TODO - fill duplex properly */ if_link->duplex = DUPLEX_FULL; qed_mcp_get_media_type(hwfn, ptt, &media_type); if_link->port = qed_get_port_type(media_type); if_link->autoneg = params.speed.autoneg; if (params.pause.autoneg) if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; if (params.pause.forced_rx) if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; if (params.pause.forced_tx) if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; if (link.an_complete) phylink_set(if_link->lp_caps, Autoneg); if (link.partner_adv_pause) phylink_set(if_link->lp_caps, Pause); if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) phylink_set(if_link->lp_caps, Asym_Pause); if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { if_link->eee_supported = false; } else { if_link->eee_supported = true; if_link->eee_active = link.eee_active; if_link->sup_caps = link_caps.eee_speed_caps; /* MFW clears adv_caps on eee disable; use configured value */ if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : params.eee.adv_caps; if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; if_link->eee.enable = params.eee.enable; if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; } } static void qed_get_current_link(struct qed_dev *cdev, struct qed_link_output *if_link) { struct qed_hwfn *hwfn; struct qed_ptt *ptt; int i; hwfn = &cdev->hwfns[0]; if (IS_PF(cdev)) { ptt = qed_ptt_acquire(hwfn); if (ptt) { qed_fill_link(hwfn, ptt, if_link); qed_ptt_release(hwfn, ptt); } else { DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); } } else { qed_fill_link(hwfn, NULL, if_link); } for_each_hwfn(cdev, i) qed_inform_vf_link_state(&cdev->hwfns[i]); } void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) { void *cookie = hwfn->cdev->ops_cookie; struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; struct qed_link_output if_link; qed_fill_link(hwfn, ptt, &if_link); qed_inform_vf_link_state(hwfn); if (IS_LEAD_HWFN(hwfn) && cookie) op->link_update(cookie, &if_link); } void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) { void *cookie = hwfn->cdev->ops_cookie; struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update) op->bw_update(cookie); } static int qed_drain(struct qed_dev *cdev) { struct qed_hwfn *hwfn; struct qed_ptt *ptt; int i, rc; if (IS_VF(cdev)) return 0; for_each_hwfn(cdev, i) { hwfn = &cdev->hwfns[i]; ptt = qed_ptt_acquire(hwfn); if (!ptt) { DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); return -EBUSY; } rc = qed_mcp_drain(hwfn, ptt); qed_ptt_release(hwfn, ptt); if (rc) return rc; } return 0; } static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, struct qed_nvm_image_att *nvm_image, u32 *crc) { u8 *buf = NULL; int rc; /* Allocate a buffer for holding the nvram image */ buf = kzalloc(nvm_image->length, GFP_KERNEL); if (!buf) return -ENOMEM; /* Read image into buffer */ rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, buf, nvm_image->length); if (rc) { DP_ERR(cdev, "Failed reading image from nvm\n"); goto out; } /* Convert the buffer into big-endian format (excluding the * closing 4 bytes of CRC). */ cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf, DIV_ROUND_UP(nvm_image->length - 4, 4)); /* Calc CRC for the "actual" image buffer, i.e. not including * the last 4 CRC bytes. */ *crc = ~crc32(~0U, buf, nvm_image->length - 4); *crc = (__force u32)cpu_to_be32p(crc); out: kfree(buf); return rc; } /* Binary file format - * /----------------------------------------------------------------------\ * 0B | 0x4 [command index] | * 4B | image_type | Options | Number of register settings | * 8B | Value | * 12B | Mask | * 16B | Offset | * \----------------------------------------------------------------------/ * There can be several Value-Mask-Offset sets as specified by 'Number of...'. * Options - 0'b - Calculate & Update CRC for image */ static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, bool *check_resp) { struct qed_nvm_image_att nvm_image; struct qed_hwfn *p_hwfn; bool is_crc = false; u32 image_type; int rc = 0, i; u16 len; *data += 4; image_type = **data; p_hwfn = QED_LEADING_HWFN(cdev); for (i = 0; i < p_hwfn->nvm_info.num_images; i++) if (image_type == p_hwfn->nvm_info.image_att[i].image_type) break; if (i == p_hwfn->nvm_info.num_images) { DP_ERR(cdev, "Failed to find nvram image of type %08x\n", image_type); return -ENOENT; } nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; nvm_image.length = p_hwfn->nvm_info.image_att[i].len; DP_VERBOSE(cdev, NETIF_MSG_DRV, "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", **data, image_type, nvm_image.start_addr, nvm_image.start_addr + nvm_image.length - 1); (*data)++; is_crc = !!(**data & BIT(0)); (*data)++; len = *((u16 *)*data); *data += 2; if (is_crc) { u32 crc = 0; rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); if (rc) { DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); goto exit; } rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, (nvm_image.start_addr + nvm_image.length - 4), (u8 *)&crc, 4); if (rc) DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", nvm_image.start_addr + nvm_image.length - 4, rc); goto exit; } /* Iterate over the values for setting */ while (len) { u32 offset, mask, value, cur_value; u8 buf[4]; value = *((u32 *)*data); *data += 4; mask = *((u32 *)*data); *data += 4; offset = *((u32 *)*data); *data += 4; rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, 4); if (rc) { DP_ERR(cdev, "Failed reading from %08x\n", nvm_image.start_addr + offset); goto exit; } cur_value = le32_to_cpu(*((__le32 *)buf)); DP_VERBOSE(cdev, NETIF_MSG_DRV, "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", nvm_image.start_addr + offset, cur_value, (cur_value & ~mask) | (value & mask), value, mask); value = (value & mask) | (cur_value & ~mask); rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, nvm_image.start_addr + offset, (u8 *)&value, 4); if (rc) { DP_ERR(cdev, "Failed writing to %08x\n", nvm_image.start_addr + offset); goto exit; } len--; } exit: return rc; } /* Binary file format - * /----------------------------------------------------------------------\ * 0B | 0x3 [command index] | * 4B | b'0: check_response? | b'1-31 reserved | * 8B | File-type | reserved | * 12B | Image length in bytes | * \----------------------------------------------------------------------/ * Start a new file of the provided type */ static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, const u8 **data, bool *check_resp) { u32 file_type, file_size = 0; int rc; *data += 4; *check_resp = !!(**data & BIT(0)); *data += 4; file_type = **data; DP_VERBOSE(cdev, NETIF_MSG_DRV, "About to start a new file of type %02x\n", file_type); if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { *data += 4; file_size = *((u32 *)(*data)); } rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, (u8 *)(&file_size), 4); *data += 4; return rc; } /* Binary file format - * /----------------------------------------------------------------------\ * 0B | 0x2 [command index] | * 4B | Length in bytes | * 8B | b'0: check_response? | b'1-31 reserved | * 12B | Offset in bytes | * 16B | Data ... | * \----------------------------------------------------------------------/ * Write data as part of a file that was previously started. Data should be * of length equal to that provided in the message */ static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, const u8 **data, bool *check_resp) { u32 offset, len; int rc; *data += 4; len = *((u32 *)(*data)); *data += 4; *check_resp = !!(**data & BIT(0)); *data += 4; offset = *((u32 *)(*data)); *data += 4; DP_VERBOSE(cdev, NETIF_MSG_DRV, "About to write File-data: %08x bytes to offset %08x\n", len, offset); rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, (char *)(*data), len); *data += len; return rc; } /* Binary file format [General header] - * /----------------------------------------------------------------------\ * 0B | QED_NVM_SIGNATURE | * 4B | Length in bytes | * 8B | Highest command in this batchfile | Reserved | * \----------------------------------------------------------------------/ */ static int qed_nvm_flash_image_validate(struct qed_dev *cdev, const struct firmware *image, const u8 **data) { u32 signature, len; /* Check minimum size */ if (image->size < 12) { DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); return -EINVAL; } /* Check signature */ signature = *((u32 *)(*data)); if (signature != QED_NVM_SIGNATURE) { DP_ERR(cdev, "Wrong signature '%08x'\n", signature); return -EINVAL; } *data += 4; /* Validate internal size equals the image-size */ len = *((u32 *)(*data)); if (len != image->size) { DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", len, (u32)image->size); return -EINVAL; } *data += 4; /* Make sure driver familiar with all commands necessary for this */ if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", *((u16 *)(*data))); return -EINVAL; } *data += 4; return 0; } /* Binary file format - * /----------------------------------------------------------------------\ * 0B | 0x5 [command index] | * 4B | Number of config attributes | Reserved | * 4B | Config ID | Entity ID | Length | * 4B | Value | * | | * \----------------------------------------------------------------------/ * There can be several cfg_id-entity_id-Length-Value sets as specified by * 'Number of config attributes'. * * The API parses config attributes from the user provided buffer and flashes * them to the respective NVM path using Management FW inerface. */ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); u8 entity_id, len, buf[32]; bool need_nvm_init = true; struct qed_ptt *ptt; u16 cfg_id, count; int rc = 0, i; u32 flags; ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EAGAIN; /* NVM CFG ID attribute header */ *data += 4; count = *((u16 *)*data); *data += 4; DP_VERBOSE(cdev, NETIF_MSG_DRV, "Read config ids: num_attrs = %0d\n", count); /* NVM CFG ID attributes. Start loop index from 1 to avoid additional * arithmetic operations in the implementation. */ for (i = 1; i <= count; i++) { cfg_id = *((u16 *)*data); *data += 2; entity_id = **data; (*data)++; len = **data; (*data)++; memcpy(buf, *data, len); *data += len; flags = 0; if (need_nvm_init) { flags |= QED_NVM_CFG_OPTION_INIT; need_nvm_init = false; } /* Commit to flash and free the resources */ if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) { flags |= QED_NVM_CFG_OPTION_COMMIT | QED_NVM_CFG_OPTION_FREE; need_nvm_init = true; } if (entity_id) flags |= QED_NVM_CFG_OPTION_ENTITY_SEL; DP_VERBOSE(cdev, NETIF_MSG_DRV, "cfg_id = %d entity = %d len = %d\n", cfg_id, entity_id, len); rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags, buf, len); if (rc) { DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id); break; } } qed_ptt_release(hwfn, ptt); return rc; } #define QED_MAX_NVM_BUF_LEN 32 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); u8 buf[QED_MAX_NVM_BUF_LEN]; struct qed_ptt *ptt; u32 len; int rc; ptt = qed_ptt_acquire(hwfn); if (!ptt) return QED_MAX_NVM_BUF_LEN; rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf, &len); if (rc || !len) { DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); len = QED_MAX_NVM_BUF_LEN; } qed_ptt_release(hwfn, ptt); return len; } static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data, u32 cmd, u32 entity_id) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *ptt; u32 flags, len; int rc = 0; ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EAGAIN; DP_VERBOSE(cdev, NETIF_MSG_DRV, "Read config cmd = %d entity id %d\n", cmd, entity_id); flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS; rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len); if (rc) DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); qed_ptt_release(hwfn, ptt); return rc; } static int qed_nvm_flash(struct qed_dev *cdev, const char *name) { const struct firmware *image; const u8 *data, *data_end; u32 cmd_type; int rc; rc = request_firmware(&image, name, &cdev->pdev->dev); if (rc) { DP_ERR(cdev, "Failed to find '%s'\n", name); return rc; } DP_VERBOSE(cdev, NETIF_MSG_DRV, "Flashing '%s' - firmware's data at %p, size is %08x\n", name, image->data, (u32)image->size); data = image->data; data_end = data + image->size; rc = qed_nvm_flash_image_validate(cdev, image, &data); if (rc) goto exit; while (data < data_end) { bool check_resp = false; /* Parse the actual command */ cmd_type = *((u32 *)data); switch (cmd_type) { case QED_NVM_FLASH_CMD_FILE_DATA: rc = qed_nvm_flash_image_file_data(cdev, &data, &check_resp); break; case QED_NVM_FLASH_CMD_FILE_START: rc = qed_nvm_flash_image_file_start(cdev, &data, &check_resp); break; case QED_NVM_FLASH_CMD_NVM_CHANGE: rc = qed_nvm_flash_image_access(cdev, &data, &check_resp); break; case QED_NVM_FLASH_CMD_NVM_CFG_ID: rc = qed_nvm_flash_cfg_write(cdev, &data); break; default: DP_ERR(cdev, "Unknown command %08x\n", cmd_type); rc = -EINVAL; goto exit; } if (rc) { DP_ERR(cdev, "Command %08x failed\n", cmd_type); goto exit; } /* Check response if needed */ if (check_resp) { u32 mcp_response = 0; if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { DP_ERR(cdev, "Failed getting MCP response\n"); rc = -EINVAL; goto exit; } switch (mcp_response & FW_MSG_CODE_MASK) { case FW_MSG_CODE_OK: case FW_MSG_CODE_NVM_OK: case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: case FW_MSG_CODE_PHY_OK: break; default: DP_ERR(cdev, "MFW returns error: %08x\n", mcp_response); rc = -EINVAL; goto exit; } } } exit: release_firmware(image); return rc; } static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, u8 *buf, u16 len) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); return qed_mcp_get_nvm_image(hwfn, type, buf, len); } void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) { struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; void *cookie = p_hwfn->cdev->ops_cookie; if (ops && ops->schedule_recovery_handler) ops->schedule_recovery_handler(cookie); } static const char * const qed_hw_err_type_descr[] = { [QED_HW_ERR_FAN_FAIL] = "Fan Failure", [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure", [QED_HW_ERR_HW_ATTN] = "HW Attention", [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure", [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure", [QED_HW_ERR_FW_ASSERT] = "FW Assertion", [QED_HW_ERR_LAST] = "Unknown", }; void qed_hw_error_occurred(struct qed_hwfn *p_hwfn, enum qed_hw_err_type err_type) { struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; void *cookie = p_hwfn->cdev->ops_cookie; const char *err_str; if (err_type > QED_HW_ERR_LAST) err_type = QED_HW_ERR_LAST; err_str = qed_hw_err_type_descr[err_type]; DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str); /* Call the HW error handler of the protocol driver. * If it is not available - perform a minimal handling of preventing * HW attentions from being reasserted. */ if (ops && ops->schedule_hw_err_handler) ops->schedule_hw_err_handler(cookie, err_type); else qed_int_attn_clr_enable(p_hwfn->cdev, true); } static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, void *handle) { return qed_set_queue_coalesce(rx_coal, tx_coal, handle); } static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *ptt; int status = 0; ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EAGAIN; status = qed_mcp_set_led(hwfn, ptt, mode); qed_ptt_release(hwfn, ptt); return status; } int qed_recovery_process(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt; int rc = 0; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EAGAIN; rc = qed_start_recovery_process(p_hwfn, p_ptt); qed_ptt_release(p_hwfn, p_ptt); return rc; } static int qed_update_wol(struct qed_dev *cdev, bool enabled) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *ptt; int rc = 0; if (IS_VF(cdev)) return 0; ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EAGAIN; rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED : QED_OV_WOL_DISABLED); if (rc) goto out; rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); out: qed_ptt_release(hwfn, ptt); return rc; } static int qed_update_drv_state(struct qed_dev *cdev, bool active) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *ptt; int status = 0; if (IS_VF(cdev)) return 0; ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EAGAIN; status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? QED_OV_DRIVER_STATE_ACTIVE : QED_OV_DRIVER_STATE_DISABLED); qed_ptt_release(hwfn, ptt); return status; } static int qed_update_mac(struct qed_dev *cdev, const u8 *mac) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *ptt; int status = 0; if (IS_VF(cdev)) return 0; ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EAGAIN; status = qed_mcp_ov_update_mac(hwfn, ptt, mac); if (status) goto out; status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); out: qed_ptt_release(hwfn, ptt); return status; } static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *ptt; int status = 0; if (IS_VF(cdev)) return 0; ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EAGAIN; status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); if (status) goto out; status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); out: qed_ptt_release(hwfn, ptt); return status; } static int qed_get_sb_info(struct qed_dev *cdev, struct qed_sb_info *sb, u16 qid, struct qed_sb_info_dbg *sb_dbg) { struct qed_hwfn *hwfn = &cdev->hwfns[qid % cdev->num_hwfns]; struct qed_ptt *ptt; int rc; if (IS_VF(cdev)) return -EINVAL; ptt = qed_ptt_acquire(hwfn); if (!ptt) { DP_NOTICE(hwfn, "Can't acquire PTT\n"); return -EAGAIN; } memset(sb_dbg, 0, sizeof(*sb_dbg)); rc = qed_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg); qed_ptt_release(hwfn, ptt); return rc; } static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, u8 dev_addr, u32 offset, u32 len) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *ptt; int rc = 0; if (IS_VF(cdev)) return 0; ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EAGAIN; rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, offset, len, buf); qed_ptt_release(hwfn, ptt); return rc; } static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *ptt; int rc = 0; if (IS_VF(cdev)) return 0; ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EAGAIN; rc = qed_dbg_grc_config(hwfn, cfg_id, val); qed_ptt_release(hwfn, ptt); return rc; } static __printf(2, 3) void qed_mfw_report(struct qed_dev *cdev, char *fmt, ...) { char buf[QED_MFW_REPORT_STR_SIZE]; struct qed_hwfn *p_hwfn; struct qed_ptt *p_ptt; va_list vl; va_start(vl, fmt); vsnprintf(buf, QED_MFW_REPORT_STR_SIZE, fmt, vl); va_end(vl); if (IS_PF(cdev)) { p_hwfn = QED_LEADING_HWFN(cdev); p_ptt = qed_ptt_acquire(p_hwfn); if (p_ptt) { qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, strlen(buf)); qed_ptt_release(p_hwfn, p_ptt); } } } static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) { return QED_AFFIN_HWFN_IDX(cdev); } static int qed_get_esl_status(struct qed_dev *cdev, bool *esl_active) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *ptt; int rc = 0; *esl_active = false; if (IS_VF(cdev)) return 0; ptt = qed_ptt_acquire(hwfn); if (!ptt) return -EAGAIN; rc = qed_mcp_get_esl_status(hwfn, ptt, esl_active); qed_ptt_release(hwfn, ptt); return rc; } static struct qed_selftest_ops qed_selftest_ops_pass = { .selftest_memory = &qed_selftest_memory, .selftest_interrupt = &qed_selftest_interrupt, .selftest_register = &qed_selftest_register, .selftest_clock = &qed_selftest_clock, .selftest_nvram = &qed_selftest_nvram, }; const struct qed_common_ops qed_common_ops_pass = { .selftest = &qed_selftest_ops_pass, .probe = &qed_probe, .remove = &qed_remove, .set_power_state = &qed_set_power_state, .set_name = &qed_set_name, .update_pf_params = &qed_update_pf_params, .slowpath_start = &qed_slowpath_start, .slowpath_stop = &qed_slowpath_stop, .set_fp_int = &qed_set_int_fp, .get_fp_int = &qed_get_int_fp, .sb_init = &qed_sb_init, .sb_release = &qed_sb_release, .simd_handler_config = &qed_simd_handler_config, .simd_handler_clean = &qed_simd_handler_clean, .dbg_grc = &qed_dbg_grc, .dbg_grc_size = &qed_dbg_grc_size, .can_link_change = &qed_can_link_change, .set_link = &qed_set_link, .get_link = &qed_get_current_link, .drain = &qed_drain, .update_msglvl = &qed_init_dp, .devlink_register = qed_devlink_register, .devlink_unregister = qed_devlink_unregister, .report_fatal_error = qed_report_fatal_error, .dbg_all_data = &qed_dbg_all_data, .dbg_all_data_size = &qed_dbg_all_data_size, .chain_alloc = &qed_chain_alloc, .chain_free = &qed_chain_free, .nvm_flash = &qed_nvm_flash, .nvm_get_image = &qed_nvm_get_image, .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, .recovery_process = &qed_recovery_process, .recovery_prolog = &qed_recovery_prolog, .attn_clr_enable = &qed_int_attn_clr_enable, .update_drv_state = &qed_update_drv_state, .update_mac = &qed_update_mac, .update_mtu = &qed_update_mtu, .update_wol = &qed_update_wol, .db_recovery_add = &qed_db_recovery_add, .db_recovery_del = &qed_db_recovery_del, .read_module_eeprom = &qed_read_module_eeprom, .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, .read_nvm_cfg = &qed_nvm_flash_cfg_read, .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, .set_grc_config = &qed_set_grc_config, .mfw_report = &qed_mfw_report, .get_sb_info = &qed_get_sb_info, .get_esl_status = &qed_get_esl_status, }; void qed_get_protocol_stats(struct qed_dev *cdev, enum qed_mcp_protocol_type type, union qed_mcp_protocol_stats *stats) { struct qed_eth_stats eth_stats; memset(stats, 0, sizeof(*stats)); switch (type) { case QED_MCP_LAN_STATS: qed_get_vport_stats_context(cdev, &eth_stats, true); stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts; stats->lan_stats.fcs_err = -1; break; case QED_MCP_FCOE_STATS: qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats, true); break; case QED_MCP_ISCSI_STATS: qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats, true); break; default: DP_VERBOSE(cdev, QED_MSG_SP, "Invalid protocol type = %d\n", type); return; } } int qed_mfw_tlv_req(struct qed_hwfn *hwfn) { DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, "Scheduling slowpath task [Flag: %d]\n", QED_SLOWPATH_MFW_TLV_REQ); /* Memory barrier for setting atomic bit */ smp_mb__before_atomic(); set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); /* Memory barrier after setting atomic bit */ smp_mb__after_atomic(); queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); return 0; } static void qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) { struct qed_common_cb_ops *op = cdev->protocol_ops.common; struct qed_eth_stats_common *p_common; struct qed_generic_tlvs gen_tlvs; struct qed_eth_stats stats; int i; memset(&gen_tlvs, 0, sizeof(gen_tlvs)); op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) tlv->flags.ipv4_csum_offload = true; if (gen_tlvs.feat_flags & QED_TLV_LSO) tlv->flags.lso_supported = true; tlv->flags.b_set = true; for (i = 0; i < QED_TLV_MAC_COUNT; i++) { if (is_valid_ether_addr(gen_tlvs.mac[i])) { ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); tlv->mac_set[i] = true; } } qed_get_vport_stats(cdev, &stats); p_common = &stats.common; tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + p_common->rx_bcast_pkts; tlv->rx_frames_set = true; tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + p_common->rx_bcast_bytes; tlv->rx_bytes_set = true; tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + p_common->tx_bcast_pkts; tlv->tx_frames_set = true; tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + p_common->tx_bcast_bytes; tlv->rx_bytes_set = true; } int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, union qed_mfw_tlv_data *tlv_buf) { struct qed_dev *cdev = hwfn->cdev; struct qed_common_cb_ops *ops; ops = cdev->protocol_ops.common; if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { DP_NOTICE(hwfn, "Can't collect TLV management info\n"); return -EINVAL; } switch (type) { case QED_MFW_TLV_GENERIC: qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); break; case QED_MFW_TLV_ETH: ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); break; case QED_MFW_TLV_FCOE: ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); break; case QED_MFW_TLV_ISCSI: ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); break; default: break; } return 0; } unsigned long qed_get_epoch_time(void) { return ktime_get_real_seconds(); }
linux-master
drivers/net/ethernet/qlogic/qed/qed_main.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2016 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/crc32.h> #include "qed.h" #include "qed_dev_api.h" #include "qed_mcp.h" #include "qed_sp.h" #include "qed_selftest.h" int qed_selftest_memory(struct qed_dev *cdev) { int rc = 0, i; for_each_hwfn(cdev, i) { rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]); if (rc) return rc; } return rc; } int qed_selftest_interrupt(struct qed_dev *cdev) { int rc = 0, i; for_each_hwfn(cdev, i) { rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]); if (rc) return rc; } return rc; } int qed_selftest_register(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn; struct qed_ptt *p_ptt; int rc = 0, i; /* although performed by MCP, this test is per engine */ for_each_hwfn(cdev, i) { p_hwfn = &cdev->hwfns[i]; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) { DP_ERR(p_hwfn, "failed to acquire ptt\n"); return -EBUSY; } rc = qed_mcp_bist_register_test(p_hwfn, p_ptt); qed_ptt_release(p_hwfn, p_ptt); if (rc) break; } return rc; } int qed_selftest_clock(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn; struct qed_ptt *p_ptt; int rc = 0, i; /* although performed by MCP, this test is per engine */ for_each_hwfn(cdev, i) { p_hwfn = &cdev->hwfns[i]; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) { DP_ERR(p_hwfn, "failed to acquire ptt\n"); return -EBUSY; } rc = qed_mcp_bist_clock_test(p_hwfn, p_ptt); qed_ptt_release(p_hwfn, p_ptt); if (rc) break; } return rc; } int qed_selftest_nvram(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); u32 num_images, i, j, nvm_crc, calc_crc; struct bist_nvm_image_att image_att; u8 *buf = NULL; __be32 val; int rc; if (!p_ptt) { DP_ERR(p_hwfn, "failed to acquire ptt\n"); return -EBUSY; } /* Acquire from MFW the amount of available images */ rc = qed_mcp_bist_nvm_get_num_images(p_hwfn, p_ptt, &num_images); if (rc || !num_images) { DP_ERR(p_hwfn, "Failed getting number of images\n"); rc = -EINVAL; goto err0; } /* Iterate over images and validate CRC */ for (i = 0; i < num_images; i++) { /* This mailbox returns information about the image required for * reading it. */ rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt, &image_att, i); if (rc) { DP_ERR(p_hwfn, "Failed getting image index %d attributes\n", i); goto err0; } /* After MFW crash dump is collected - the image's CRC stops * being valid. */ if (image_att.image_type == NVM_TYPE_MDUMP) continue; DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i, image_att.len); /* Allocate a buffer for holding the nvram image */ buf = kzalloc(image_att.len, GFP_KERNEL); if (!buf) { rc = -ENOMEM; goto err0; } /* Read image into buffer */ rc = qed_mcp_nvm_read(p_hwfn->cdev, image_att.nvm_start_addr, buf, image_att.len); if (rc) { DP_ERR(p_hwfn, "Failed reading image index %d from nvm.\n", i); goto err1; } /* Convert the buffer into big-endian format (excluding the * closing 4 bytes of CRC). */ for (j = 0; j < image_att.len - 4; j += 4) { val = cpu_to_be32(*(u32 *)&buf[j]); *(u32 *)&buf[j] = (__force u32)val; } /* Calc CRC for the "actual" image buffer, i.e. not including * the last 4 CRC bytes. */ nvm_crc = *(u32 *)(buf + image_att.len - 4); calc_crc = crc32(0xffffffff, buf, image_att.len - 4); calc_crc = (__force u32)~cpu_to_be32(calc_crc); DP_VERBOSE(p_hwfn, QED_MSG_SP, "nvm crc 0x%x, calc_crc 0x%x\n", nvm_crc, calc_crc); if (calc_crc != nvm_crc) { rc = -EINVAL; goto err1; } /* Done with this image; Free to prevent double release * on subsequent failure. */ kfree(buf); buf = NULL; } qed_ptt_release(p_hwfn, p_ptt); return 0; err1: kfree(buf); err0: qed_ptt_release(p_hwfn, p_ptt); return rc; }
linux-master
drivers/net/ethernet/qlogic/qed/qed_selftest.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/qed/qed_chain.h> #include "qed.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_reg_addr.h" #include "qed_sriov.h" #define QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT 1000 #define QED_BAR_ACQUIRE_TIMEOUT_USLEEP 1000 #define QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT 100000 #define QED_BAR_ACQUIRE_TIMEOUT_UDELAY 10 /* Invalid values */ #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1)) struct qed_ptt { struct list_head list_entry; unsigned int idx; struct pxp_ptt_entry pxp; u8 hwfn_id; }; struct qed_ptt_pool { struct list_head free_list; spinlock_t lock; /* ptt synchronized access */ struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM]; }; int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) { struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL); int i; if (!p_pool) return -ENOMEM; INIT_LIST_HEAD(&p_pool->free_list); for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { p_pool->ptts[i].idx = i; p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET; p_pool->ptts[i].pxp.pretend.control = 0; p_pool->ptts[i].hwfn_id = p_hwfn->my_id; if (i >= RESERVED_PTT_MAX) list_add(&p_pool->ptts[i].list_entry, &p_pool->free_list); } p_hwfn->p_ptt_pool = p_pool; spin_lock_init(&p_pool->lock); return 0; } void qed_ptt_invalidate(struct qed_hwfn *p_hwfn) { struct qed_ptt *p_ptt; int i; for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { p_ptt = &p_hwfn->p_ptt_pool->ptts[i]; p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET; } } void qed_ptt_pool_free(struct qed_hwfn *p_hwfn) { kfree(p_hwfn->p_ptt_pool); p_hwfn->p_ptt_pool = NULL; } struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) { return qed_ptt_acquire_context(p_hwfn, false); } struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn, bool is_atomic) { struct qed_ptt *p_ptt; unsigned int i, count; if (is_atomic) count = QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT; else count = QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT; /* Take the free PTT from the list */ for (i = 0; i < count; i++) { spin_lock_bh(&p_hwfn->p_ptt_pool->lock); if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) { p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list, struct qed_ptt, list_entry); list_del(&p_ptt->list_entry); spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "allocated ptt %d\n", p_ptt->idx); return p_ptt; } spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); if (is_atomic) udelay(QED_BAR_ACQUIRE_TIMEOUT_UDELAY); else usleep_range(QED_BAR_ACQUIRE_TIMEOUT_USLEEP, QED_BAR_ACQUIRE_TIMEOUT_USLEEP * 2); } DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n"); return NULL; } void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { spin_lock_bh(&p_hwfn->p_ptt_pool->lock); list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list); spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); } u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { /* The HW is using DWORDS and we need to translate it to Bytes */ return le32_to_cpu(p_ptt->pxp.offset) << 2; } static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt) { return PXP_PF_WINDOW_ADMIN_PER_PF_START + p_ptt->idx * sizeof(struct pxp_ptt_entry); } u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt) { return PXP_EXTERNAL_BAR_PF_WINDOW_START + p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE; } void qed_ptt_set_win(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 new_hw_addr) { u32 prev_hw_addr; prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); if (new_hw_addr == prev_hw_addr) return; /* Update PTT entery in admin window */ DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Updating PTT entry %d to offset 0x%x\n", p_ptt->idx, new_hw_addr); /* The HW is using DWORDS and the address is in Bytes */ p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2); REG_WR(p_hwfn, qed_ptt_config_addr(p_ptt) + offsetof(struct pxp_ptt_entry, offset), le32_to_cpu(p_ptt->pxp.offset)); } static u32 qed_set_ptt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 hw_addr) { u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); u32 offset; offset = hw_addr - win_hw_addr; if (p_ptt->hwfn_id != p_hwfn->my_id) DP_NOTICE(p_hwfn, "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n", p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id); /* Verify the address is within the window */ if (hw_addr < win_hw_addr || offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) { qed_ptt_set_win(p_hwfn, p_ptt, hw_addr); offset = 0; } return qed_ptt_get_bar_addr(p_ptt) + offset; } struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, enum reserved_ptts ptt_idx) { if (ptt_idx >= RESERVED_PTT_MAX) { DP_NOTICE(p_hwfn, "Requested PTT %d is out of range\n", ptt_idx); return NULL; } return &p_hwfn->p_ptt_pool->ptts[ptt_idx]; } void qed_wr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 hw_addr, u32 val) { u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); REG_WR(p_hwfn, bar_addr, val); DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", bar_addr, hw_addr, val); } u32 qed_rd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 hw_addr) { u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); u32 val = REG_RD(p_hwfn, bar_addr); DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", bar_addr, hw_addr, val); return val; } static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, void *addr, u32 hw_addr, size_t n, bool to_device) { u32 dw_count, *host_addr, hw_offset; size_t quota, done = 0; u32 __iomem *reg_addr; while (done < n) { quota = min_t(size_t, n - done, PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE); if (IS_PF(p_hwfn->cdev)) { qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); hw_offset = qed_ptt_get_bar_addr(p_ptt); } else { hw_offset = hw_addr + done; } dw_count = quota / 4; host_addr = (u32 *)((u8 *)addr + done); reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset); if (to_device) while (dw_count--) DIRECT_REG_WR(reg_addr++, *host_addr++); else while (dw_count--) *host_addr++ = DIRECT_REG_RD(reg_addr++); done += quota; } } void qed_memcpy_from(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n", hw_addr, dest, hw_addr, (unsigned long)n); qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false); } void qed_memcpy_to(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n", hw_addr, hw_addr, src, (unsigned long)n); qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true); } void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid) { u16 control = 0; SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); /* Every pretend undos previous pretends, including * previous port pretend. */ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); p_ptt->pxp.pretend.control = cpu_to_le16(control); p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid); REG_WR(p_hwfn, qed_ptt_config_addr(p_ptt) + offsetof(struct pxp_ptt_entry, pretend), *(u32 *)&p_ptt->pxp.pretend); } void qed_port_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 port_id) { u16 control = 0; SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); p_ptt->pxp.pretend.control = cpu_to_le16(control); REG_WR(p_hwfn, qed_ptt_config_addr(p_ptt) + offsetof(struct pxp_ptt_entry, pretend), *(u32 *)&p_ptt->pxp.pretend); } void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u16 control = 0; SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); p_ptt->pxp.pretend.control = cpu_to_le16(control); REG_WR(p_hwfn, qed_ptt_config_addr(p_ptt) + offsetof(struct pxp_ptt_entry, pretend), *(u32 *)&p_ptt->pxp.pretend); } void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 port_id, u16 fid) { u16 control = 0; SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); p_ptt->pxp.pretend.control = cpu_to_le16(control); p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid); REG_WR(p_hwfn, qed_ptt_config_addr(p_ptt) + offsetof(struct pxp_ptt_entry, pretend), *(u32 *)&p_ptt->pxp.pretend); } u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid) { u32 concrete_fid = 0; SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id); SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid); SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1); return concrete_fid; } /* DMAE */ #define QED_DMAE_FLAGS_IS_SET(params, flag) \ ((params) != NULL && GET_FIELD((params)->flags, QED_DMAE_PARAMS_##flag)) static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, const u8 is_src_type_grc, const u8 is_dst_type_grc, struct qed_dmae_params *p_params) { u8 src_pfid, dst_pfid, port_id; u16 opcode_b = 0; u32 opcode = 0; /* Whether the source is the PCIe or the GRC. * 0- The source is the PCIe * 1- The source is the GRC. */ SET_FIELD(opcode, DMAE_CMD_SRC, (is_src_type_grc ? dmae_cmd_src_grc : dmae_cmd_src_pcie)); src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, SRC_PF_VALID) ? p_params->src_pfid : p_hwfn->rel_pf_id; SET_FIELD(opcode, DMAE_CMD_SRC_PF_ID, src_pfid); /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */ SET_FIELD(opcode, DMAE_CMD_DST, (is_dst_type_grc ? dmae_cmd_dst_grc : dmae_cmd_dst_pcie)); dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, DST_PF_VALID) ? p_params->dst_pfid : p_hwfn->rel_pf_id; SET_FIELD(opcode, DMAE_CMD_DST_PF_ID, dst_pfid); /* Whether to write a completion word to the completion destination: * 0-Do not write a completion word * 1-Write the completion word */ SET_FIELD(opcode, DMAE_CMD_COMP_WORD_EN, 1); SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1); if (QED_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST)) SET_FIELD(opcode, DMAE_CMD_COMP_FUNC, 1); /* swapping mode 3 - big endian */ SET_FIELD(opcode, DMAE_CMD_ENDIANITY_MODE, DMAE_CMD_ENDIANITY); port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT_VALID)) ? p_params->port_id : p_hwfn->port_id; SET_FIELD(opcode, DMAE_CMD_PORT_ID, port_id); /* reset source address in next go */ SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1); /* reset dest address in next go */ SET_FIELD(opcode, DMAE_CMD_DST_ADDR_RESET, 1); /* SRC/DST VFID: all 1's - pf, otherwise VF id */ if (QED_DMAE_FLAGS_IS_SET(p_params, SRC_VF_VALID)) { SET_FIELD(opcode, DMAE_CMD_SRC_VF_ID_VALID, 1); SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, p_params->src_vfid); } else { SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, 0xFF); } if (QED_DMAE_FLAGS_IS_SET(p_params, DST_VF_VALID)) { SET_FIELD(opcode, DMAE_CMD_DST_VF_ID_VALID, 1); SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, p_params->dst_vfid); } else { SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, 0xFF); } p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode); p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b); } u32 qed_dmae_idx_to_go_cmd(u8 idx) { /* All the DMAE 'go' registers form an array in internal memory */ return DMAE_REG_GO_C0 + (idx << 2); } static int qed_dmae_post_command(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd; u8 idx_cmd = p_hwfn->dmae_info.channel, i; int qed_status = 0; /* verify address is not NULL */ if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) || ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) { DP_NOTICE(p_hwfn, "source or destination address 0 idx_cmd=%d\n" "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", idx_cmd, le32_to_cpu(p_command->opcode), le16_to_cpu(p_command->opcode_b), le16_to_cpu(p_command->length_dw), le32_to_cpu(p_command->src_addr_hi), le32_to_cpu(p_command->src_addr_lo), le32_to_cpu(p_command->dst_addr_hi), le32_to_cpu(p_command->dst_addr_lo)); return -EINVAL; } DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", idx_cmd, le32_to_cpu(p_command->opcode), le16_to_cpu(p_command->opcode_b), le16_to_cpu(p_command->length_dw), le32_to_cpu(p_command->src_addr_hi), le32_to_cpu(p_command->src_addr_lo), le32_to_cpu(p_command->dst_addr_hi), le32_to_cpu(p_command->dst_addr_lo)); /* Copy the command to DMAE - need to do it before every call * for source/dest address no reset. * The first 9 DWs are the command registers, the 10 DW is the * GO register, and the rest are result registers * (which are read only by the client). */ for (i = 0; i < DMAE_CMD_SIZE; i++) { u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ? *(((u32 *)p_command) + i) : 0; qed_wr(p_hwfn, p_ptt, DMAE_REG_CMD_MEM + (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) + (i * sizeof(u32)), data); } qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE); return qed_status; } int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn) { dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr; struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd; u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer; u32 **p_comp = &p_hwfn->dmae_info.p_completion_word; *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(u32), p_addr, GFP_KERNEL); if (!*p_comp) goto err; p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr; *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(struct dmae_cmd), p_addr, GFP_KERNEL); if (!*p_cmd) goto err; p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr; *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(u32) * DMAE_MAX_RW_SIZE, p_addr, GFP_KERNEL); if (!*p_buff) goto err; p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; return 0; err: qed_dmae_info_free(p_hwfn); return -ENOMEM; } void qed_dmae_info_free(struct qed_hwfn *p_hwfn) { dma_addr_t p_phys; /* Just make sure no one is in the middle */ mutex_lock(&p_hwfn->dmae_info.mutex); if (p_hwfn->dmae_info.p_completion_word) { p_phys = p_hwfn->dmae_info.completion_word_phys_addr; dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(u32), p_hwfn->dmae_info.p_completion_word, p_phys); p_hwfn->dmae_info.p_completion_word = NULL; } if (p_hwfn->dmae_info.p_dmae_cmd) { p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr; dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(struct dmae_cmd), p_hwfn->dmae_info.p_dmae_cmd, p_phys); p_hwfn->dmae_info.p_dmae_cmd = NULL; } if (p_hwfn->dmae_info.p_intermediate_buffer) { p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(u32) * DMAE_MAX_RW_SIZE, p_hwfn->dmae_info.p_intermediate_buffer, p_phys); p_hwfn->dmae_info.p_intermediate_buffer = NULL; } mutex_unlock(&p_hwfn->dmae_info.mutex); } static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn) { u32 wait_cnt_limit = 10000, wait_cnt = 0; int qed_status = 0; barrier(); while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) { udelay(DMAE_MIN_WAIT_TIME); if (++wait_cnt > wait_cnt_limit) { DP_NOTICE(p_hwfn->cdev, "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n", *p_hwfn->dmae_info.p_completion_word, DMAE_COMPLETION_VAL); qed_status = -EBUSY; break; } /* to sync the completion_word since we are not * using the volatile keyword for p_completion_word */ barrier(); } if (qed_status == 0) *p_hwfn->dmae_info.p_completion_word = 0; return qed_status; } static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 src_addr, u64 dst_addr, u8 src_type, u8 dst_type, u32 length_dw) { dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; int qed_status = 0; switch (src_type) { case QED_DMAE_ADDRESS_GRC: case QED_DMAE_ADDRESS_HOST_PHYS: cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr)); cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr)); break; /* for virtual source addresses we use the intermediate buffer. */ case QED_DMAE_ADDRESS_HOST_VIRT: cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys)); cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys)); memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0], (void *)(uintptr_t)src_addr, length_dw * sizeof(u32)); break; default: return -EINVAL; } switch (dst_type) { case QED_DMAE_ADDRESS_GRC: case QED_DMAE_ADDRESS_HOST_PHYS: cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr)); cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr)); break; /* for virtual source addresses we use the intermediate buffer. */ case QED_DMAE_ADDRESS_HOST_VIRT: cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys)); cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys)); break; default: return -EINVAL; } cmd->length_dw = cpu_to_le16((u16)length_dw); qed_dmae_post_command(p_hwfn, p_ptt); qed_status = qed_dmae_operation_wait(p_hwfn); if (qed_status) { DP_NOTICE(p_hwfn, "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n", src_addr, dst_addr, length_dw); return qed_status; } if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT) memcpy((void *)(uintptr_t)(dst_addr), &p_hwfn->dmae_info.p_intermediate_buffer[0], length_dw * sizeof(u32)); return 0; } static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 src_addr, u64 dst_addr, u8 src_type, u8 dst_type, u32 size_in_dwords, struct qed_dmae_params *p_params) { dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr; u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0; struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; u64 src_addr_split = 0, dst_addr_split = 0; u16 length_limit = DMAE_MAX_RW_SIZE; int qed_status = 0; u32 offset = 0; if (p_hwfn->cdev->recov_in_prog) { DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n", src_addr, src_type, dst_addr, dst_type, size_in_dwords); /* Let the flow complete w/o any error handling */ return 0; } qed_dmae_opcode(p_hwfn, (src_type == QED_DMAE_ADDRESS_GRC), (dst_type == QED_DMAE_ADDRESS_GRC), p_params); cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys)); cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys)); cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL); /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */ cnt_split = size_in_dwords / length_limit; length_mod = size_in_dwords % length_limit; src_addr_split = src_addr; dst_addr_split = dst_addr; for (i = 0; i <= cnt_split; i++) { offset = length_limit * i; if (!QED_DMAE_FLAGS_IS_SET(p_params, RW_REPL_SRC)) { if (src_type == QED_DMAE_ADDRESS_GRC) src_addr_split = src_addr + offset; else src_addr_split = src_addr + (offset * 4); } if (dst_type == QED_DMAE_ADDRESS_GRC) dst_addr_split = dst_addr + offset; else dst_addr_split = dst_addr + (offset * 4); length_cur = (cnt_split == i) ? length_mod : length_limit; /* might be zero on last iteration */ if (!length_cur) continue; qed_status = qed_dmae_execute_sub_operation(p_hwfn, p_ptt, src_addr_split, dst_addr_split, src_type, dst_type, length_cur); if (qed_status) { qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_DMAE_FAIL, "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n", qed_status, src_addr, dst_addr, length_cur); break; } } return qed_status; } int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 source_addr, u32 grc_addr, u32 size_in_dwords, struct qed_dmae_params *p_params) { u32 grc_addr_in_dw = grc_addr / sizeof(u32); int rc; mutex_lock(&p_hwfn->dmae_info.mutex); rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, grc_addr_in_dw, QED_DMAE_ADDRESS_HOST_VIRT, QED_DMAE_ADDRESS_GRC, size_in_dwords, p_params); mutex_unlock(&p_hwfn->dmae_info.mutex); return rc; } int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords, struct qed_dmae_params *p_params) { u32 grc_addr_in_dw = grc_addr / sizeof(u32); int rc; mutex_lock(&p_hwfn->dmae_info.mutex); rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw, dest_addr, QED_DMAE_ADDRESS_GRC, QED_DMAE_ADDRESS_HOST_VIRT, size_in_dwords, p_params); mutex_unlock(&p_hwfn->dmae_info.mutex); return rc; } int qed_dmae_host2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, dma_addr_t source_addr, dma_addr_t dest_addr, u32 size_in_dwords, struct qed_dmae_params *p_params) { int rc; mutex_lock(&(p_hwfn->dmae_info.mutex)); rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, dest_addr, QED_DMAE_ADDRESS_HOST_PHYS, QED_DMAE_ADDRESS_HOST_PHYS, size_in_dwords, p_params); mutex_unlock(&(p_hwfn->dmae_info.mutex)); return rc; } void qed_hw_err_notify(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_hw_err_type err_type, const char *fmt, ...) { char buf[QED_HW_ERR_MAX_STR_SIZE]; va_list vl; int len; if (fmt) { va_start(vl, fmt); len = vsnprintf(buf, QED_HW_ERR_MAX_STR_SIZE, fmt, vl); va_end(vl); if (len > QED_HW_ERR_MAX_STR_SIZE - 1) len = QED_HW_ERR_MAX_STR_SIZE - 1; DP_NOTICE(p_hwfn, "%s", buf); } /* Fan failure cannot be masked by handling of another HW error */ if (p_hwfn->cdev->recov_in_prog && err_type != QED_HW_ERR_FAN_FAIL) { DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "Recovery is in progress. Avoid notifying about HW error %d.\n", err_type); return; } qed_hw_error_occurred(p_hwfn, err_type); if (fmt) qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, len); } int qed_dmae_sanity(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, const char *phase) { u32 size = PAGE_SIZE / 2, val; int rc = 0; dma_addr_t p_phys; void *p_virt; u32 *p_tmp; p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, &p_phys, GFP_KERNEL); if (!p_virt) { DP_NOTICE(p_hwfn, "DMAE sanity [%s]: failed to allocate memory\n", phase); return -ENOMEM; } /* Fill the bottom half of the allocated memory with a known pattern */ for (p_tmp = (u32 *)p_virt; p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) { /* Save the address itself as the value */ val = (u32)(uintptr_t)p_tmp; *p_tmp = val; } /* Zero the top half of the allocated memory */ memset((u8 *)p_virt + size, 0, size); DP_VERBOSE(p_hwfn, QED_MSG_SP, "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n", phase, (u64)p_phys, p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size); rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size, size / 4, NULL); if (rc) { DP_NOTICE(p_hwfn, "DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n", phase, rc); goto out; } /* Verify that the top half of the allocated memory has the pattern */ for (p_tmp = (u32 *)((u8 *)p_virt + size); p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) { /* The corresponding address in the bottom half */ val = (u32)(uintptr_t)p_tmp - size; if (*p_tmp != val) { DP_NOTICE(p_hwfn, "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n", phase, (u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt), p_tmp, *p_tmp, val); rc = -EINVAL; goto out; } } out: dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys); return rc; }
linux-master
drivers/net/ethernet/qlogic/qed/qed_hw.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include "qed.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_iro_hsi.h" #include "qed_reg_addr.h" #include "qed_sriov.h" #define QED_INIT_MAX_POLL_COUNT 100 #define QED_INIT_POLL_PERIOD_US 500 static u32 pxp_global_win[] = { 0, 0, 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */ 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */ 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */ 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */ 0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */ 0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */ 0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */ 0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */ 0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */ 0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */ 0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */ 0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */ 0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */ 0, 0, 0, 0, }; /* IRO Array */ static const u32 iro_arr[] = { 0x00000000, 0x00000000, 0x00080000, 0x00004478, 0x00000008, 0x00080000, 0x00003288, 0x00000088, 0x00880000, 0x000058a8, 0x00000020, 0x00200000, 0x00003188, 0x00000008, 0x00080000, 0x00000b00, 0x00000008, 0x00040000, 0x00000a80, 0x00000008, 0x00040000, 0x00000000, 0x00000008, 0x00020000, 0x00000080, 0x00000008, 0x00040000, 0x00000084, 0x00000008, 0x00020000, 0x00005798, 0x00000004, 0x00040000, 0x00004e50, 0x00000000, 0x00780000, 0x00003e40, 0x00000000, 0x00780000, 0x00004500, 0x00000000, 0x00780000, 0x00003210, 0x00000000, 0x00780000, 0x00003b50, 0x00000000, 0x00780000, 0x00007f58, 0x00000000, 0x00780000, 0x00005fd8, 0x00000000, 0x00080000, 0x00007100, 0x00000000, 0x00080000, 0x0000af20, 0x00000000, 0x00080000, 0x00004398, 0x00000000, 0x00080000, 0x0000a5a0, 0x00000000, 0x00080000, 0x0000bde8, 0x00000000, 0x00080000, 0x00000020, 0x00000004, 0x00040000, 0x00005688, 0x00000010, 0x00100000, 0x0000c210, 0x00000030, 0x00300000, 0x0000b108, 0x00000038, 0x00380000, 0x00003d20, 0x00000080, 0x00400000, 0x0000bf60, 0x00000000, 0x00040000, 0x00004560, 0x00040080, 0x00040000, 0x000001f8, 0x00000004, 0x00040000, 0x00003d60, 0x00000080, 0x00200000, 0x00008960, 0x00000040, 0x00300000, 0x0000e840, 0x00000060, 0x00600000, 0x00004698, 0x00000080, 0x00380000, 0x000107b8, 0x000000c0, 0x00c00000, 0x000001f8, 0x00000002, 0x00020000, 0x0000a260, 0x00000000, 0x01080000, 0x0000a368, 0x00000008, 0x00080000, 0x000001c0, 0x00000008, 0x00080000, 0x000001f8, 0x00000008, 0x00080000, 0x00000ac0, 0x00000008, 0x00080000, 0x00002578, 0x00000008, 0x00080000, 0x000024f8, 0x00000008, 0x00080000, 0x00000280, 0x00000008, 0x00080000, 0x00000680, 0x00080018, 0x00080000, 0x00000b78, 0x00080018, 0x00020000, 0x0000c600, 0x00000058, 0x003c0000, 0x00012038, 0x00000020, 0x00100000, 0x00011b00, 0x00000048, 0x00180000, 0x00009650, 0x00000050, 0x00200000, 0x00008b10, 0x00000040, 0x00280000, 0x000116c0, 0x00000018, 0x00100000, 0x0000c808, 0x00000048, 0x00380000, 0x00011790, 0x00000020, 0x00200000, 0x000046d0, 0x00000080, 0x00100000, 0x00003618, 0x00000010, 0x00100000, 0x0000a9e8, 0x00000008, 0x00010000, 0x000097a0, 0x00000008, 0x00010000, 0x00011a10, 0x00000008, 0x00010000, 0x0000e9f8, 0x00000008, 0x00010000, 0x00012648, 0x00000008, 0x00010000, 0x000121c8, 0x00000008, 0x00010000, 0x0000af08, 0x00000030, 0x00100000, 0x0000d748, 0x00000028, 0x00280000, 0x00009e68, 0x00000018, 0x00180000, 0x00009fe8, 0x00000008, 0x00080000, 0x00013ea8, 0x00000008, 0x00080000, 0x00012f18, 0x00000018, 0x00180000, 0x0000dfe8, 0x00500288, 0x00100000, 0x000131a0, 0x00000138, 0x00280000, }; void qed_init_iro_array(struct qed_dev *cdev) { cdev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET; } void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val) { if (rt_offset >= RUNTIME_ARRAY_SIZE) { DP_ERR(p_hwfn, "Avoid storing %u in rt_data at index %u!\n", val, rt_offset); return; } p_hwfn->rt_data.init_val[rt_offset] = val; p_hwfn->rt_data.b_valid[rt_offset] = true; } void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 *p_val, size_t size) { size_t i; if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) { DP_ERR(p_hwfn, "Avoid storing values in rt_data at indices %u-%u!\n", rt_offset, (u32)(rt_offset + size - 1)); return; } for (i = 0; i < size / sizeof(u32); i++) { p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i]; p_hwfn->rt_data.b_valid[rt_offset + i] = true; } } static int qed_init_rt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 addr, u16 rt_offset, u16 size, bool b_must_dmae) { u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; u16 i, j, segment; int rc = 0; /* Since not all RT entries are initialized, go over the RT and * for each segment of initialized values use DMA. */ for (i = 0; i < size; i++) { if (!p_valid[i]) continue; /* In case there isn't any wide-bus configuration here, * simply write the data instead of using dmae. */ if (!b_must_dmae) { qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]); p_valid[i] = false; continue; } /* Start of a new segment */ for (segment = 1; i + segment < size; segment++) if (!p_valid[i + segment]) break; rc = qed_dmae_host2grc(p_hwfn, p_ptt, (uintptr_t)(p_init_val + i), addr + (i << 2), segment, NULL); if (rc) return rc; /* invalidate after writing */ for (j = i; j < (u32)(i + segment); j++) p_valid[j] = false; /* Jump over the entire segment, including invalid entry */ i += segment; } return rc; } int qed_init_alloc(struct qed_hwfn *p_hwfn) { struct qed_rt_data *rt_data = &p_hwfn->rt_data; if (IS_VF(p_hwfn->cdev)) return 0; rt_data->b_valid = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(bool), GFP_KERNEL); if (!rt_data->b_valid) return -ENOMEM; rt_data->init_val = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(u32), GFP_KERNEL); if (!rt_data->init_val) { kfree(rt_data->b_valid); rt_data->b_valid = NULL; return -ENOMEM; } return 0; } void qed_init_free(struct qed_hwfn *p_hwfn) { kfree(p_hwfn->rt_data.init_val); p_hwfn->rt_data.init_val = NULL; kfree(p_hwfn->rt_data.b_valid); p_hwfn->rt_data.b_valid = NULL; } static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 addr, u32 dmae_data_offset, u32 size, const u32 *buf, bool b_must_dmae, bool b_can_dmae) { int rc = 0; /* Perform DMAE only for lengthy enough sections or for wide-bus */ if (!b_can_dmae || (!b_must_dmae && (size < 16))) { const u32 *data = buf + dmae_data_offset; u32 i; for (i = 0; i < size; i++) qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]); } else { rc = qed_dmae_host2grc(p_hwfn, p_ptt, (uintptr_t)(buf + dmae_data_offset), addr, size, NULL); } return rc; } static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 addr, u32 fill_count) { static u32 zero_buffer[DMAE_MAX_RW_SIZE]; struct qed_dmae_params params = {}; memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE); /* invoke the DMAE virtual/physical buffer API with * 1. DMAE init channel * 2. addr, * 3. p_hwfb->temp_data, * 4. fill_count */ SET_FIELD(params.flags, QED_DMAE_PARAMS_RW_REPL_SRC, 0x1); return qed_dmae_host2grc(p_hwfn, p_ptt, (uintptr_t)(&zero_buffer[0]), addr, fill_count, &params); } static void qed_init_fill(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 addr, u32 fill, u32 fill_count) { u32 i; for (i = 0; i < fill_count; i++, addr += sizeof(u32)) qed_wr(p_hwfn, p_ptt, addr, fill); } static int qed_init_cmd_array(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct init_write_op *cmd, bool b_must_dmae, bool b_can_dmae) { u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset); u32 data = le32_to_cpu(cmd->data); u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; u32 offset, output_len, input_len, max_size; struct qed_dev *cdev = p_hwfn->cdev; union init_array_hdr *hdr; const u32 *array_data; int rc = 0; u32 size; array_data = cdev->fw_data->arr_data; hdr = (union init_array_hdr *)(array_data + dmae_array_offset); data = le32_to_cpu(hdr->raw.data); switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) { case INIT_ARR_ZIPPED: offset = dmae_array_offset + 1; input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE); max_size = MAX_ZIPPED_SIZE * 4; memset(p_hwfn->unzip_buf, 0, max_size); output_len = qed_unzip_data(p_hwfn, input_len, (u8 *)&array_data[offset], max_size, (u8 *)p_hwfn->unzip_buf); if (output_len) { rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0, output_len, p_hwfn->unzip_buf, b_must_dmae, b_can_dmae); } else { DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n"); rc = -EINVAL; } break; case INIT_ARR_PATTERN: { u32 repeats = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_REPETITIONS); u32 i; size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE); for (i = 0; i < repeats; i++, addr += size << 2) { rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, dmae_array_offset + 1, size, array_data, b_must_dmae, b_can_dmae); if (rc) break; } break; } case INIT_ARR_STANDARD: size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE); rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, dmae_array_offset + 1, size, array_data, b_must_dmae, b_can_dmae); break; } return rc; } /* init_ops write command */ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct init_write_op *p_cmd, bool b_can_dmae) { u32 data = le32_to_cpu(p_cmd->data); bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS); u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; union init_write_args *arg = &p_cmd->args; int rc = 0; /* Sanitize */ if (b_must_dmae && !b_can_dmae) { DP_NOTICE(p_hwfn, "Need to write to %08x for Wide-bus but DMAE isn't allowed\n", addr); return -EINVAL; } switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) { case INIT_SRC_INLINE: data = le32_to_cpu(p_cmd->args.inline_val); qed_wr(p_hwfn, p_ptt, addr, data); break; case INIT_SRC_ZEROS: data = le32_to_cpu(p_cmd->args.zeros_count); if (b_must_dmae || (b_can_dmae && (data >= 64))) rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, data); else qed_init_fill(p_hwfn, p_ptt, addr, 0, data); break; case INIT_SRC_ARRAY: rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd, b_must_dmae, b_can_dmae); break; case INIT_SRC_RUNTIME: qed_init_rt(p_hwfn, p_ptt, addr, le16_to_cpu(arg->runtime.offset), le16_to_cpu(arg->runtime.size), b_must_dmae); break; } return rc; } static inline bool comp_eq(u32 val, u32 expected_val) { return val == expected_val; } static inline bool comp_and(u32 val, u32 expected_val) { return (val & expected_val) == expected_val; } static inline bool comp_or(u32 val, u32 expected_val) { return (val | expected_val) > 0; } /* init_ops read/poll commands */ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct init_read_op *cmd) { bool (*comp_check)(u32 val, u32 expected_val); u32 delay = QED_INIT_POLL_PERIOD_US, val; u32 data, addr, poll; int i; data = le32_to_cpu(cmd->op_data); addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE); val = qed_rd(p_hwfn, p_ptt, addr); if (poll == INIT_POLL_NONE) return; switch (poll) { case INIT_POLL_EQ: comp_check = comp_eq; break; case INIT_POLL_OR: comp_check = comp_or; break; case INIT_POLL_AND: comp_check = comp_and; break; default: DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", cmd->op_data); return; } data = le32_to_cpu(cmd->expected_val); for (i = 0; i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) { udelay(delay); val = qed_rd(p_hwfn, p_ptt, addr); } if (i == QED_INIT_MAX_POLL_COUNT) { DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n", addr, le32_to_cpu(cmd->expected_val), val, le32_to_cpu(cmd->op_data)); } } /* init_ops callbacks entry point */ static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct init_callback_op *p_cmd) { int rc; switch (p_cmd->callback_id) { case DMAE_READY_CB: rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase"); break; default: DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n", p_cmd->callback_id); return -EINVAL; } return rc; } static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn, u16 *p_offset, int modes) { struct qed_dev *cdev = p_hwfn->cdev; const u8 *modes_tree_buf; u8 arg1, arg2, tree_val; modes_tree_buf = cdev->fw_data->modes_tree_buf; tree_val = modes_tree_buf[(*p_offset)++]; switch (tree_val) { case INIT_MODE_OP_NOT: return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1; case INIT_MODE_OP_OR: arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); return arg1 | arg2; case INIT_MODE_OP_AND: arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); return arg1 & arg2; default: tree_val -= MAX_INIT_MODE_OPS; return (modes & BIT(tree_val)) ? 1 : 0; } } static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn, struct init_if_mode_op *p_cmd, int modes) { u16 offset = le16_to_cpu(p_cmd->modes_buf_offset); if (qed_init_cmd_mode_match(p_hwfn, &offset, modes)) return 0; else return GET_FIELD(le32_to_cpu(p_cmd->op_data), INIT_IF_MODE_OP_CMD_OFFSET); } static u32 qed_init_cmd_phase(struct init_if_phase_op *p_cmd, u32 phase, u32 phase_id) { u32 data = le32_to_cpu(p_cmd->phase_data); u32 op_data = le32_to_cpu(p_cmd->op_data); if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase && (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID || GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id))) return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET); else return 0; } int qed_init_run(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int phase, int phase_id, int modes) { bool b_dmae = (phase != PHASE_ENGINE); struct qed_dev *cdev = p_hwfn->cdev; u32 cmd_num, num_init_ops; union init_op *init_ops; int rc = 0; num_init_ops = cdev->fw_data->init_ops_size; init_ops = cdev->fw_data->init_ops; p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC); if (!p_hwfn->unzip_buf) return -ENOMEM; for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) { union init_op *cmd = &init_ops[cmd_num]; u32 data = le32_to_cpu(cmd->raw.op_data); switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) { case INIT_OP_WRITE: rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write, b_dmae); break; case INIT_OP_READ: qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read); break; case INIT_OP_IF_MODE: cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode, modes); break; case INIT_OP_IF_PHASE: cmd_num += qed_init_cmd_phase(&cmd->if_phase, phase, phase_id); break; case INIT_OP_DELAY: /* qed_init_run is always invoked from * sleep-able context */ udelay(le32_to_cpu(cmd->delay.delay)); break; case INIT_OP_CALLBACK: rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); if (phase == PHASE_ENGINE && cmd->callback.callback_id == DMAE_READY_CB) b_dmae = true; break; } if (rc) break; } kfree(p_hwfn->unzip_buf); p_hwfn->unzip_buf = NULL; return rc; } void qed_gtt_init(struct qed_hwfn *p_hwfn) { u32 gtt_base; u32 i; /* Set the global windows */ gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START; for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++) if (pxp_global_win[i]) REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE, pxp_global_win[i]); } int qed_init_fw_data(struct qed_dev *cdev, const u8 *data) { struct qed_fw_data *fw = cdev->fw_data; struct bin_buffer_hdr *buf_hdr; u32 offset, len; if (!data) { DP_NOTICE(cdev, "Invalid fw data\n"); return -EINVAL; } /* First Dword contains metadata and should be skipped */ buf_hdr = (struct bin_buffer_hdr *)data; offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset; fw->fw_ver_info = (struct fw_ver_info *)(data + offset); offset = buf_hdr[BIN_BUF_INIT_CMD].offset; fw->init_ops = (union init_op *)(data + offset); offset = buf_hdr[BIN_BUF_INIT_VAL].offset; fw->arr_data = (u32 *)(data + offset); offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset; fw->modes_tree_buf = (u8 *)(data + offset); len = buf_hdr[BIN_BUF_INIT_CMD].length; fw->init_ops_size = len / sizeof(struct init_raw_op); offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset; fw->fw_overlays = (u32 *)(data + offset); len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length; fw->fw_overlays_len = len; return 0; }
linux-master
drivers/net/ethernet/qlogic/qed/qed_init_ops.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <asm/byteorder.h> #include <asm/param.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/log2.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/workqueue.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/spinlock.h> #define __PREVENT_DUMP_MEM_ARR__ #define __PREVENT_PXP_GLOBAL_WIN__ #include "qed.h" #include "qed_cxt.h" #include "qed_dev_api.h" #include "qed_fcoe.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_iro_hsi.h" #include "qed_ll2.h" #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" #include <linux/qed/qed_fcoe_if.h> struct qed_fcoe_conn { struct list_head list_entry; bool free_on_delete; u16 conn_id; u32 icid; u32 fw_cid; u8 layer_code; dma_addr_t sq_pbl_addr; dma_addr_t sq_curr_page_addr; dma_addr_t sq_next_page_addr; dma_addr_t xferq_pbl_addr; void *xferq_pbl_addr_virt_addr; dma_addr_t xferq_addr[4]; void *xferq_addr_virt_addr[4]; dma_addr_t confq_pbl_addr; void *confq_pbl_addr_virt_addr; dma_addr_t confq_addr[2]; void *confq_addr_virt_addr[2]; dma_addr_t terminate_params; u16 dst_mac_addr_lo; u16 dst_mac_addr_mid; u16 dst_mac_addr_hi; u16 src_mac_addr_lo; u16 src_mac_addr_mid; u16 src_mac_addr_hi; u16 tx_max_fc_pay_len; u16 e_d_tov_timer_val; u16 rec_tov_timer_val; u16 rx_max_fc_pay_len; u16 vlan_tag; u16 physical_q0; struct fc_addr_nw s_id; u8 max_conc_seqs_c3; struct fc_addr_nw d_id; u8 flags; u8 def_q_idx; }; static int qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { struct qed_fcoe_pf_params *fcoe_pf_params = NULL; struct fcoe_init_ramrod_params *p_ramrod = NULL; struct fcoe_init_func_ramrod_data *p_data; struct fcoe_conn_context *p_cxt = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; struct qed_cxt_info cxt_info; u32 dummy_cid; int rc = 0; __le16 tmp; u8 i; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, FCOE_RAMROD_CMD_ID_INIT_FUNC, PROTOCOLID_FCOE, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.fcoe_init; p_data = &p_ramrod->init_ramrod_data; fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params; /* Sanity */ if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) { DP_ERR(p_hwfn, "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n", fcoe_pf_params->num_cqs, p_hwfn->hw_info.feat_num[QED_FCOE_CQ]); rc = -EINVAL; goto err; } p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages); p_data->sq_num_pages_in_pbl = tmp; rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid); if (rc) goto err; cxt_info.iid = dummy_cid; rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); if (rc) { DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n", dummy_cid); goto err; } p_cxt = cxt_info.p_cxt; memset(p_cxt, 0, sizeof(*p_cxt)); SET_FIELD(p_cxt->tstorm_ag_context.flags3, TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); fcoe_pf_params->dummy_icid = (u16)dummy_cid; tmp = cpu_to_le16(fcoe_pf_params->num_tasks); p_data->func_params.num_tasks = tmp; p_data->func_params.log_page_size = fcoe_pf_params->log_page_size; p_data->func_params.debug_mode = fcoe_pf_params->debug_mode; DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr, fcoe_pf_params->glbl_q_params_addr); tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries); p_data->q_params.cq_num_entries = tmp; tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries); p_data->q_params.cmdq_num_entries = tmp; p_data->q_params.num_queues = fcoe_pf_params->num_cqs; tmp = (__force __le16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS]; p_data->q_params.queue_relative_offset = (__force u8)tmp; for (i = 0; i < fcoe_pf_params->num_cqs; i++) { tmp = cpu_to_le16(qed_get_igu_sb_id(p_hwfn, i)); p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp; } p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi; p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi; p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ); DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ], fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]); p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] = fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ]; tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ]); p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = tmp; tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ]); p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = tmp; DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA], fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]); p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] = fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]; tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]); p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = tmp; tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]); p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = tmp; tmp = cpu_to_le16(fcoe_pf_params->rq_buffer_size); p_data->q_params.rq_buffer_size = tmp; if (fcoe_pf_params->is_target) { SET_FIELD(p_data->q_params.q_validity, SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA]) SET_FIELD(p_data->q_params.q_validity, SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1); SET_FIELD(p_data->q_params.q_validity, SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1); } else { SET_FIELD(p_data->q_params.q_validity, SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); } rc = qed_spq_post(p_hwfn, p_ent, NULL); return rc; err: qed_sp_destroy_request(p_hwfn, p_ent); return rc; } static int qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, struct qed_fcoe_conn *p_conn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL; struct fcoe_conn_offload_ramrod_data *p_data; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u16 physical_q0; __le16 tmp; int rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_conn->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, PROTOCOLID_FCOE, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.fcoe_conn_ofld; p_data = &p_ramrod->offload_ramrod_data; /* Transmission PQ is the first of the PF */ physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); p_conn->physical_q0 = physical_q0; p_data->physical_q0 = cpu_to_le16(physical_q0); p_data->conn_id = cpu_to_le16(p_conn->conn_id); DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr); DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr); DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr); DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr); DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]); DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]); DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr); DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]); DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]); p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo); p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid); p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi); p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo); p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid); p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi); tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len); p_data->tx_max_fc_pay_len = tmp; tmp = cpu_to_le16(p_conn->e_d_tov_timer_val); p_data->e_d_tov_timer_val = tmp; tmp = cpu_to_le16(p_conn->rec_tov_timer_val); p_data->rec_rr_tov_timer_val = tmp; tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len); p_data->rx_max_fc_pay_len = tmp; p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag); p_data->s_id.addr_hi = p_conn->s_id.addr_hi; p_data->s_id.addr_mid = p_conn->s_id.addr_mid; p_data->s_id.addr_lo = p_conn->s_id.addr_lo; p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3; p_data->d_id.addr_hi = p_conn->d_id.addr_hi; p_data->d_id.addr_mid = p_conn->d_id.addr_mid; p_data->d_id.addr_lo = p_conn->d_id.addr_lo; p_data->flags = p_conn->flags; if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) SET_FIELD(p_data->flags, FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1); p_data->def_q_idx = p_conn->def_q_idx; return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn, struct qed_fcoe_conn *p_conn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = 0; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_conn->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, PROTOCOLID_FCOE, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.fcoe_conn_terminate; DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr, p_conn->terminate_params); return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u32 active_segs = 0; int rc = 0; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, PROTOCOLID_FCOE, &init_data); if (rc) return rc; active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK); active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG); qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs); return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn, struct qed_fcoe_conn **p_out_conn) { struct qed_fcoe_conn *p_conn = NULL; void *p_addr; u32 i; spin_lock_bh(&p_hwfn->p_fcoe_info->lock); if (!list_empty(&p_hwfn->p_fcoe_info->free_list)) p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list, struct qed_fcoe_conn, list_entry); if (p_conn) { list_del(&p_conn->list_entry); spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); *p_out_conn = p_conn; return 0; } spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL); if (!p_conn) return -ENOMEM; p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, &p_conn->xferq_pbl_addr, GFP_KERNEL); if (!p_addr) goto nomem_pbl_xferq; p_conn->xferq_pbl_addr_virt_addr = p_addr; for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) { p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, &p_conn->xferq_addr[i], GFP_KERNEL); if (!p_addr) goto nomem_xferq; p_conn->xferq_addr_virt_addr[i] = p_addr; p_addr = p_conn->xferq_pbl_addr_virt_addr; ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i]; } p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, &p_conn->confq_pbl_addr, GFP_KERNEL); if (!p_addr) goto nomem_xferq; p_conn->confq_pbl_addr_virt_addr = p_addr; for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) { p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, &p_conn->confq_addr[i], GFP_KERNEL); if (!p_addr) goto nomem_confq; p_conn->confq_addr_virt_addr[i] = p_addr; p_addr = p_conn->confq_pbl_addr_virt_addr; ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i]; } p_conn->free_on_delete = true; *p_out_conn = p_conn; return 0; nomem_confq: dma_free_coherent(&p_hwfn->cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, p_conn->confq_pbl_addr_virt_addr, p_conn->confq_pbl_addr); for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) if (p_conn->confq_addr_virt_addr[i]) dma_free_coherent(&p_hwfn->cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, p_conn->confq_addr_virt_addr[i], p_conn->confq_addr[i]); nomem_xferq: dma_free_coherent(&p_hwfn->cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, p_conn->xferq_pbl_addr_virt_addr, p_conn->xferq_pbl_addr); for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) if (p_conn->xferq_addr_virt_addr[i]) dma_free_coherent(&p_hwfn->cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, p_conn->xferq_addr_virt_addr[i], p_conn->xferq_addr[i]); nomem_pbl_xferq: kfree(p_conn); return -ENOMEM; } static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn, struct qed_fcoe_conn *p_conn) { u32 i; if (!p_conn) return; if (p_conn->confq_pbl_addr_virt_addr) dma_free_coherent(&p_hwfn->cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, p_conn->confq_pbl_addr_virt_addr, p_conn->confq_pbl_addr); for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) { if (!p_conn->confq_addr_virt_addr[i]) continue; dma_free_coherent(&p_hwfn->cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, p_conn->confq_addr_virt_addr[i], p_conn->confq_addr[i]); } if (p_conn->xferq_pbl_addr_virt_addr) dma_free_coherent(&p_hwfn->cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, p_conn->xferq_pbl_addr_virt_addr, p_conn->xferq_pbl_addr); for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) { if (!p_conn->xferq_addr_virt_addr[i]) continue; dma_free_coherent(&p_hwfn->cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, p_conn->xferq_addr_virt_addr[i], p_conn->xferq_addr[i]); } kfree(p_conn); } static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) { return (u8 __iomem *)p_hwfn->doorbells + qed_db_addr(cid, DQ_DEMS_LEGACY); } static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, u8 bdq_id) { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, MSTORM_SCSI_BDQ_EXT_PROD, RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; } } static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, u8 bdq_id) { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, TSTORM_SCSI_BDQ_EXT_PROD, RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; } } int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) { struct qed_fcoe_info *p_fcoe_info; /* Allocate LL2's set struct */ p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL); if (!p_fcoe_info) { DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n"); return -ENOMEM; } INIT_LIST_HEAD(&p_fcoe_info->free_list); p_hwfn->p_fcoe_info = p_fcoe_info; return 0; } void qed_fcoe_setup(struct qed_hwfn *p_hwfn) { struct fcoe_task_context *p_task_ctx = NULL; u32 i, lc; int rc; spin_lock_init(&p_hwfn->p_fcoe_info->lock); for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) { rc = qed_cxt_get_task_ctx(p_hwfn, i, QED_CTX_WORKING_MEM, (void **)&p_task_ctx); if (rc) continue; memset(p_task_ctx, 0, sizeof(struct fcoe_task_context)); lc = 0; SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1); p_task_ctx->timer_context.logical_client_0 = cpu_to_le32(lc); lc = 0; SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC1, 1); p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc); SET_FIELD(p_task_ctx->tstorm_ag_context.flags0, TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); } } void qed_fcoe_free(struct qed_hwfn *p_hwfn) { struct qed_fcoe_conn *p_conn = NULL; if (!p_hwfn->p_fcoe_info) return; while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) { p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list, struct qed_fcoe_conn, list_entry); if (!p_conn) break; list_del(&p_conn->list_entry); qed_fcoe_free_connection(p_hwfn, p_conn); } kfree(p_hwfn->p_fcoe_info); p_hwfn->p_fcoe_info = NULL; } static int qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn, struct qed_fcoe_conn *p_in_conn, struct qed_fcoe_conn **p_out_conn) { struct qed_fcoe_conn *p_conn = NULL; int rc = 0; u32 icid; spin_lock_bh(&p_hwfn->p_fcoe_info->lock); rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid); spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); if (rc) return rc; /* Use input connection [if provided] or allocate a new one */ if (p_in_conn) { p_conn = p_in_conn; } else { rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn); if (rc) { spin_lock_bh(&p_hwfn->p_fcoe_info->lock); qed_cxt_release_cid(p_hwfn, icid); spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); return rc; } } p_conn->icid = icid; p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid; *p_out_conn = p_conn; return rc; } static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn, struct qed_fcoe_conn *p_conn) { spin_lock_bh(&p_hwfn->p_fcoe_info->lock); list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list); qed_cxt_release_cid(p_hwfn, p_conn->icid); spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); } static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_fcoe_stats *p_stats) { struct fcoe_rx_stat tstats; u32 tstats_addr; memset(&tstats, 0, sizeof(tstats)); tstats_addr = BAR0_MAP_REG_TSDM_RAM + TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id); qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats)); p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt); p_stats->fcoe_rx_data_pkt_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt); p_stats->fcoe_rx_xfer_pkt_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt); p_stats->fcoe_rx_other_pkt_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt); p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt = le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt); p_stats->fcoe_silent_drop_pkt_rq_full_cnt = le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt); p_stats->fcoe_silent_drop_pkt_crc_error_cnt = le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt); p_stats->fcoe_silent_drop_pkt_task_invalid_cnt = le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt); p_stats->fcoe_silent_drop_total_pkt_cnt = le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt); } static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_fcoe_stats *p_stats) { struct fcoe_tx_stat pstats; u32 pstats_addr; memset(&pstats, 0, sizeof(pstats)); pstats_addr = BAR0_MAP_REG_PSDM_RAM + PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id); qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats)); p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt); p_stats->fcoe_tx_data_pkt_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt); p_stats->fcoe_tx_xfer_pkt_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt); p_stats->fcoe_tx_other_pkt_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt); } static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn, struct qed_fcoe_stats *p_stats, bool is_atomic) { struct qed_ptt *p_ptt; memset(p_stats, 0, sizeof(*p_stats)); p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic); if (!p_ptt) { DP_ERR(p_hwfn, "Failed to acquire ptt\n"); return -EINVAL; } _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats); _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats); qed_ptt_release(p_hwfn, p_ptt); return 0; } struct qed_hash_fcoe_con { struct hlist_node node; struct qed_fcoe_conn *con; }; static int qed_fill_fcoe_dev_info(struct qed_dev *cdev, struct qed_dev_fcoe_info *info) { struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev); int rc; memset(info, 0, sizeof(*info)); rc = qed_fill_dev_info(cdev, &info->common); info->primary_dbq_rq_addr = qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ); info->secondary_bdq_rq_addr = qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); info->wwpn = hwfn->mcp_info->func_info.wwn_port; info->wwnn = hwfn->mcp_info->func_info.wwn_node; info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ); return rc; } static void qed_register_fcoe_ops(struct qed_dev *cdev, struct qed_fcoe_cb_ops *ops, void *cookie) { cdev->protocol_ops.fcoe = ops; cdev->ops_cookie = cookie; } static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev, u32 handle) { struct qed_hash_fcoe_con *hash_con = NULL; if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) return NULL; hash_for_each_possible(cdev->connections, hash_con, node, handle) { if (hash_con->con->icid == handle) break; } if (!hash_con || (hash_con->con->icid != handle)) return NULL; return hash_con; } static int qed_fcoe_stop(struct qed_dev *cdev) { struct qed_ptt *p_ptt; int rc; if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) { DP_NOTICE(cdev, "fcoe already stopped\n"); return 0; } if (!hash_empty(cdev->connections)) { DP_NOTICE(cdev, "Can't stop fcoe - not all connections were returned\n"); return -EINVAL; } p_ptt = qed_ptt_acquire(QED_AFFIN_HWFN(cdev)); if (!p_ptt) return -EAGAIN; /* Stop the fcoe */ rc = qed_sp_fcoe_func_stop(QED_AFFIN_HWFN(cdev), p_ptt, QED_SPQ_MODE_EBLOCK, NULL); cdev->flags &= ~QED_FLAG_STORAGE_STARTED; qed_ptt_release(QED_AFFIN_HWFN(cdev), p_ptt); return rc; } static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks) { int rc; if (cdev->flags & QED_FLAG_STORAGE_STARTED) { DP_NOTICE(cdev, "fcoe already started;\n"); return 0; } rc = qed_sp_fcoe_func_start(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK, NULL); if (rc) { DP_NOTICE(cdev, "Failed to start fcoe\n"); return rc; } cdev->flags |= QED_FLAG_STORAGE_STARTED; hash_init(cdev->connections); if (tasks) { struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info), GFP_ATOMIC); if (!tid_info) { DP_NOTICE(cdev, "Failed to allocate tasks information\n"); qed_fcoe_stop(cdev); return -ENOMEM; } rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info); if (rc) { DP_NOTICE(cdev, "Failed to gather task information\n"); qed_fcoe_stop(cdev); kfree(tid_info); return rc; } /* Fill task information */ tasks->size = tid_info->tid_size; tasks->num_tids_per_block = tid_info->num_tids_per_block; memcpy(tasks->blocks, tid_info->blocks, MAX_TID_BLOCKS_FCOE * sizeof(u8 *)); kfree(tid_info); } return 0; } static int qed_fcoe_acquire_conn(struct qed_dev *cdev, u32 *handle, u32 *fw_cid, void __iomem **p_doorbell) { struct qed_hash_fcoe_con *hash_con; int rc; /* Allocate a hashed connection */ hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL); if (!hash_con) { DP_NOTICE(cdev, "Failed to allocate hashed connection\n"); return -ENOMEM; } /* Acquire the connection */ rc = qed_fcoe_acquire_connection(QED_AFFIN_HWFN(cdev), NULL, &hash_con->con); if (rc) { DP_NOTICE(cdev, "Failed to acquire Connection\n"); kfree(hash_con); return rc; } /* Added the connection to hash table */ *handle = hash_con->con->icid; *fw_cid = hash_con->con->fw_cid; hash_add(cdev->connections, &hash_con->node, *handle); if (p_doorbell) *p_doorbell = qed_fcoe_get_db_addr(QED_AFFIN_HWFN(cdev), *handle); return 0; } static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle) { struct qed_hash_fcoe_con *hash_con; hash_con = qed_fcoe_get_hash(cdev, handle); if (!hash_con) { DP_NOTICE(cdev, "Failed to find connection for handle %d\n", handle); return -EINVAL; } hlist_del(&hash_con->node); qed_fcoe_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con); kfree(hash_con); return 0; } static int qed_fcoe_offload_conn(struct qed_dev *cdev, u32 handle, struct qed_fcoe_params_offload *conn_info) { struct qed_hash_fcoe_con *hash_con; struct qed_fcoe_conn *con; hash_con = qed_fcoe_get_hash(cdev, handle); if (!hash_con) { DP_NOTICE(cdev, "Failed to find connection for handle %d\n", handle); return -EINVAL; } /* Update the connection with information from the params */ con = hash_con->con; con->sq_pbl_addr = conn_info->sq_pbl_addr; con->sq_curr_page_addr = conn_info->sq_curr_page_addr; con->sq_next_page_addr = conn_info->sq_next_page_addr; con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len; con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val; con->rec_tov_timer_val = conn_info->rec_tov_timer_val; con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len; con->vlan_tag = conn_info->vlan_tag; con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3; con->flags = conn_info->flags; con->def_q_idx = conn_info->def_q_idx; con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) | conn_info->src_mac[4]; con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) | conn_info->src_mac[2]; con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) | conn_info->src_mac[0]; con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) | conn_info->dst_mac[4]; con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) | conn_info->dst_mac[2]; con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) | conn_info->dst_mac[0]; con->s_id.addr_hi = conn_info->s_id.addr_hi; con->s_id.addr_mid = conn_info->s_id.addr_mid; con->s_id.addr_lo = conn_info->s_id.addr_lo; con->d_id.addr_hi = conn_info->d_id.addr_hi; con->d_id.addr_mid = conn_info->d_id.addr_mid; con->d_id.addr_lo = conn_info->d_id.addr_lo; return qed_sp_fcoe_conn_offload(QED_AFFIN_HWFN(cdev), con, QED_SPQ_MODE_EBLOCK, NULL); } static int qed_fcoe_destroy_conn(struct qed_dev *cdev, u32 handle, dma_addr_t terminate_params) { struct qed_hash_fcoe_con *hash_con; struct qed_fcoe_conn *con; hash_con = qed_fcoe_get_hash(cdev, handle); if (!hash_con) { DP_NOTICE(cdev, "Failed to find connection for handle %d\n", handle); return -EINVAL; } /* Update the connection with information from the params */ con = hash_con->con; con->terminate_params = terminate_params; return qed_sp_fcoe_conn_destroy(QED_AFFIN_HWFN(cdev), con, QED_SPQ_MODE_EBLOCK, NULL); } static int qed_fcoe_stats_context(struct qed_dev *cdev, struct qed_fcoe_stats *stats, bool is_atomic) { return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic); } static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats) { return qed_fcoe_stats_context(cdev, stats, false); } void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, struct qed_mcp_fcoe_stats *stats, bool is_atomic) { struct qed_fcoe_stats proto_stats; /* Retrieve FW statistics */ memset(&proto_stats, 0, sizeof(proto_stats)); if (qed_fcoe_stats_context(cdev, &proto_stats, is_atomic)) { DP_VERBOSE(cdev, QED_MSG_STORAGE, "Failed to collect FCoE statistics\n"); return; } /* Translate FW statistics into struct */ stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt + proto_stats.fcoe_rx_xfer_pkt_cnt + proto_stats.fcoe_rx_other_pkt_cnt; stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt + proto_stats.fcoe_tx_xfer_pkt_cnt + proto_stats.fcoe_tx_other_pkt_cnt; stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt; /* Request protocol driver to fill-in the rest */ if (cdev->protocol_ops.fcoe && cdev->ops_cookie) { struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe; void *cookie = cdev->ops_cookie; if (ops->get_login_failures) stats->login_failure = ops->get_login_failures(cookie); } } static const struct qed_fcoe_ops qed_fcoe_ops_pass = { .common = &qed_common_ops_pass, .ll2 = &qed_ll2_ops_pass, .fill_dev_info = &qed_fill_fcoe_dev_info, .start = &qed_fcoe_start, .stop = &qed_fcoe_stop, .register_ops = &qed_register_fcoe_ops, .acquire_conn = &qed_fcoe_acquire_conn, .release_conn = &qed_fcoe_release_conn, .offload_conn = &qed_fcoe_offload_conn, .destroy_conn = &qed_fcoe_destroy_conn, .get_stats = &qed_fcoe_stats, }; const struct qed_fcoe_ops *qed_get_fcoe_ops(void) { return &qed_fcoe_ops_pass; } EXPORT_SYMBOL(qed_get_fcoe_ops); void qed_put_fcoe_ops(void) { } EXPORT_SYMBOL(qed_put_fcoe_ops);
linux-master
drivers/net/ethernet/qlogic/qed/qed_fcoe.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/etherdevice.h> #include <linux/crc32.h> #include <linux/vmalloc.h> #include <linux/crash_dump.h> #include <linux/qed/qed_iov_if.h> #include "qed_cxt.h" #include "qed_hsi.h" #include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" #include "qed_vf.h" static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid); static u16 qed_vf_from_entity_id(__le16 entity_id) { return le16_to_cpu(entity_id) - MAX_NUM_PFS; } static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) { u8 legacy = 0; if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == ETH_HSI_VER_NO_PKT_LEN_TUNN) legacy |= QED_QCID_LEGACY_VF_RX_PROD; if (!(p_vf->acquire.vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)) legacy |= QED_QCID_LEGACY_VF_CID; return legacy; } /* IOV ramrods */ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) { struct vf_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; u8 fp_minor; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_vf->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_VF_START, PROTOCOLID_COMMON, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.vf_start; p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); switch (p_hwfn->hw_info.personality) { case QED_PCI_ETH: p_ramrod->personality = PERSONALITY_ETH; break; case QED_PCI_ETH_ROCE: case QED_PCI_ETH_IWARP: p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; break; default: DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", p_hwfn->hw_info.personality); qed_sp_destroy_request(p_hwfn, p_ent); return -EINVAL; } fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; if (fp_minor > ETH_HSI_VER_MINOR && fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); fp_minor = ETH_HSI_VER_MINOR; } p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] - Starting using HSI %02x.%02x\n", p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, u32 concrete_vfid, u16 opaque_vfid) { struct vf_stop_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = opaque_vfid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_VF_STOP, PROTOCOLID_COMMON, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.vf_stop; p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); return qed_spq_post(p_hwfn, p_ent, NULL); } bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, int rel_vf_id, bool b_enabled_only, bool b_non_malicious) { if (!p_hwfn->pf_iov_info) { DP_NOTICE(p_hwfn->cdev, "No iov info\n"); return false; } if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) || (rel_vf_id < 0)) return false; if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && b_enabled_only) return false; if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) && b_non_malicious) return false; return true; } static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, u16 relative_vf_id, bool b_enabled_only) { struct qed_vf_info *vf = NULL; if (!p_hwfn->pf_iov_info) { DP_NOTICE(p_hwfn->cdev, "No iov info\n"); return NULL; } if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only, false)) vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; else DP_ERR(p_hwfn, "%s: VF[%d] is not enabled\n", __func__, relative_vf_id); return vf; } static struct qed_queue_cid * qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue) { int i; for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx) return p_queue->cids[i].p_cid; } return NULL; } enum qed_iov_validate_q_mode { QED_IOV_VALIDATE_Q_NA, QED_IOV_VALIDATE_Q_ENABLE, QED_IOV_VALIDATE_Q_DISABLE, }; static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, u16 qid, enum qed_iov_validate_q_mode mode, bool b_is_tx) { int i; if (mode == QED_IOV_VALIDATE_Q_NA) return true; for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { struct qed_vf_queue_cid *p_qcid; p_qcid = &p_vf->vf_queues[qid].cids[i]; if (!p_qcid->p_cid) continue; if (p_qcid->b_is_tx != b_is_tx) continue; return mode == QED_IOV_VALIDATE_Q_ENABLE; } /* In case we haven't found any valid cid, then its disabled */ return mode == QED_IOV_VALIDATE_Q_DISABLE; } static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, u16 rx_qid, enum qed_iov_validate_q_mode mode) { if (rx_qid >= p_vf->num_rxqs) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n", p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); return false; } return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false); } static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, u16 tx_qid, enum qed_iov_validate_q_mode mode) { if (tx_qid >= p_vf->num_txqs) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n", p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); return false; } return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true); } static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, u16 sb_idx) { int i; for (i = 0; i < p_vf->num_sbs; i++) if (p_vf->igu_sbs[i] == sb_idx) return true; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n", p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); return false; } static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) { u8 i; for (i = 0; i < p_vf->num_rxqs; i++) if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, QED_IOV_VALIDATE_Q_ENABLE, false)) return true; return false; } static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) { u8 i; for (i = 0; i < p_vf->num_txqs; i++) if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, QED_IOV_VALIDATE_Q_ENABLE, true)) return true; return false; } static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, int vfid, struct qed_ptt *p_ptt) { struct qed_bulletin_content *p_bulletin; int crc_size = sizeof(p_bulletin->crc); struct qed_dmae_params params; struct qed_vf_info *p_vf; p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf) return -EINVAL; if (!p_vf->vf_bulletin) return -EINVAL; p_bulletin = p_vf->bulletin.p_virt; /* Increment bulletin board version and compute crc */ p_bulletin->version++; p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size, p_vf->bulletin.size - crc_size); DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); /* propagate bulletin board via dmae to vm memory */ memset(&params, 0, sizeof(params)); SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1); params.dst_vfid = p_vf->abs_vf_id; return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, p_vf->vf_bulletin, p_vf->bulletin.size / 4, &params); } static int qed_iov_pci_cfg_info(struct qed_dev *cdev) { struct qed_hw_sriov_info *iov = cdev->p_iov_info; int pos = iov->pos; DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos); pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); if (iov->num_vfs) { DP_VERBOSE(cdev, QED_MSG_IOV, "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n"); iov->num_vfs = 0; } pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap); pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); DP_VERBOSE(cdev, QED_MSG_IOV, "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", iov->nres, iov->cap, iov->ctrl, iov->total_vfs, iov->initial_vfs, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); /* Some sanity checks */ if (iov->num_vfs > NUM_OF_VFS(cdev) || iov->total_vfs > NUM_OF_VFS(cdev)) { /* This can happen only due to a bug. In this case we set * num_vfs to zero to avoid memory corruption in the code that * assumes max number of vfs */ DP_NOTICE(cdev, "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n", iov->num_vfs); iov->num_vfs = 0; iov->total_vfs = 0; } return 0; } static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; struct qed_bulletin_content *p_bulletin_virt; dma_addr_t req_p, rply_p, bulletin_p; union pfvf_tlvs *p_reply_virt_addr; union vfpf_tlvs *p_req_virt_addr; u8 idx = 0; memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; req_p = p_iov_info->mbx_msg_phys_addr; p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; rply_p = p_iov_info->mbx_reply_phys_addr; p_bulletin_virt = p_iov_info->p_bulletins; bulletin_p = p_iov_info->bulletins_phys; if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { DP_ERR(p_hwfn, "%s called without allocating mem first\n", __func__); return; } for (idx = 0; idx < p_iov->total_vfs; idx++) { struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; u32 concrete; vf->vf_mbx.req_virt = p_req_virt_addr + idx; vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); vf->state = VF_STOPPED; vf->b_init = false; vf->bulletin.phys = idx * sizeof(struct qed_bulletin_content) + bulletin_p; vf->bulletin.p_virt = p_bulletin_virt + idx; vf->bulletin.size = sizeof(struct qed_bulletin_content); vf->relative_vf_id = idx; vf->abs_vf_id = idx + p_iov->first_vf_in_pf; concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id); vf->concrete_fid = concrete; vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | (vf->abs_vf_id << 8); vf->vport_id = idx + 1; vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; } } static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) { struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; void **p_v_addr; u16 num_vfs = 0; num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "%s for %d VFs\n", __func__, num_vfs); /* Allocate PF Mailbox buffer (per-VF) */ p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; p_v_addr = &p_iov_info->mbx_msg_virt_addr; *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, p_iov_info->mbx_msg_size, &p_iov_info->mbx_msg_phys_addr, GFP_KERNEL); if (!*p_v_addr) return -ENOMEM; /* Allocate PF Mailbox Reply buffer (per-VF) */ p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; p_v_addr = &p_iov_info->mbx_reply_virt_addr; *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, p_iov_info->mbx_reply_size, &p_iov_info->mbx_reply_phys_addr, GFP_KERNEL); if (!*p_v_addr) return -ENOMEM; p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) * num_vfs; p_v_addr = &p_iov_info->p_bulletins; *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, p_iov_info->bulletins_size, &p_iov_info->bulletins_phys, GFP_KERNEL); if (!*p_v_addr) return -ENOMEM; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", p_iov_info->mbx_msg_virt_addr, (u64)p_iov_info->mbx_msg_phys_addr, p_iov_info->mbx_reply_virt_addr, (u64)p_iov_info->mbx_reply_phys_addr, p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys); return 0; } static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn) { struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) dma_free_coherent(&p_hwfn->cdev->pdev->dev, p_iov_info->mbx_msg_size, p_iov_info->mbx_msg_virt_addr, p_iov_info->mbx_msg_phys_addr); if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) dma_free_coherent(&p_hwfn->cdev->pdev->dev, p_iov_info->mbx_reply_size, p_iov_info->mbx_reply_virt_addr, p_iov_info->mbx_reply_phys_addr); if (p_iov_info->p_bulletins) dma_free_coherent(&p_hwfn->cdev->pdev->dev, p_iov_info->bulletins_size, p_iov_info->p_bulletins, p_iov_info->bulletins_phys); } int qed_iov_alloc(struct qed_hwfn *p_hwfn) { struct qed_pf_iov *p_sriov; if (!IS_PF_SRIOV(p_hwfn)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No SR-IOV - no need for IOV db\n"); return 0; } p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL); if (!p_sriov) return -ENOMEM; p_hwfn->pf_iov_info = p_sriov; qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, qed_sriov_eqe_event); return qed_iov_allocate_vfdb(p_hwfn); } void qed_iov_setup(struct qed_hwfn *p_hwfn) { if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) return; qed_iov_setup_vfdb(p_hwfn); } void qed_iov_free(struct qed_hwfn *p_hwfn) { qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); if (IS_PF_SRIOV_ALLOC(p_hwfn)) { qed_iov_free_vfdb(p_hwfn); kfree(p_hwfn->pf_iov_info); } } void qed_iov_free_hw_info(struct qed_dev *cdev) { kfree(cdev->p_iov_info); cdev->p_iov_info = NULL; } int qed_iov_hw_info(struct qed_hwfn *p_hwfn) { struct qed_dev *cdev = p_hwfn->cdev; int pos; int rc; if (is_kdump_kernel()) return 0; if (IS_VF(p_hwfn->cdev)) return 0; /* Learn the PCI configuration */ pos = pci_find_ext_capability(p_hwfn->cdev->pdev, PCI_EXT_CAP_ID_SRIOV); if (!pos) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n"); return 0; } /* Allocate a new struct for IOV information */ cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL); if (!cdev->p_iov_info) return -ENOMEM; cdev->p_iov_info->pos = pos; rc = qed_iov_pci_cfg_info(cdev); if (rc) return rc; /* We want PF IOV to be synonemous with the existence of p_iov_info; * In case the capability is published but there are no VFs, simply * de-allocate the struct. */ if (!cdev->p_iov_info->total_vfs) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "IOV capabilities, but no VFs are published\n"); kfree(cdev->p_iov_info); cdev->p_iov_info = NULL; return 0; } /* First VF index based on offset is tricky: * - If ARI is supported [likely], offset - (16 - pf_id) would * provide the number for eng0. 2nd engine Vfs would begin * after the first engine's VFs. * - If !ARI, VFs would start on next device. * so offset - (256 - pf_id) would provide the number. * Utilize the fact that (256 - pf_id) is achieved only by later * to differentiate between the two. */ if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) { u32 first = p_hwfn->cdev->p_iov_info->offset + p_hwfn->abs_pf_id - 16; cdev->p_iov_info->first_vf_in_pf = first; if (QED_PATH_ID(p_hwfn)) cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; } else { u32 first = p_hwfn->cdev->p_iov_info->offset + p_hwfn->abs_pf_id - 256; cdev->p_iov_info->first_vf_in_pf = first; } DP_VERBOSE(p_hwfn, QED_MSG_IOV, "First VF in hwfn 0x%08x\n", cdev->p_iov_info->first_vf_in_pf); return 0; } static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid, bool b_fail_malicious) { /* Check PF supports sriov */ if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || !IS_PF_SRIOV_ALLOC(p_hwfn)) return false; /* Check VF validity */ if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious)) return false; return true; } static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) { return _qed_iov_pf_sanity_check(p_hwfn, vfid, true); } static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, u16 rel_vf_id, u8 to_disable) { struct qed_vf_info *vf; int i; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); if (!vf) continue; vf->to_disable = to_disable; } } static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable) { u16 i; if (!IS_QED_SRIOV(cdev)) return; for (i = 0; i < cdev->p_iov_info->total_vfs; i++) qed_iov_set_vf_to_disable(cdev, i, to_disable); } static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 abs_vfid) { qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, 1 << (abs_vfid & 0x1f)); } static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { int i; /* Set VF masks and configuration - pretend */ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); /* unpretend */ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); /* iterate over all queues, clear sb consumer */ for (i = 0; i < vf->num_sbs; i++) qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, vf->igu_sbs[i], vf->opaque_fid, true); } static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf, bool enable) { u32 igu_vf_conf; qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); if (enable) igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; else igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); /* unpretend */ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); } static int qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs) { u8 current_max = 0; int i; /* For AH onward, configuration is per-PF. Find maximum of all * the currently enabled child VFs, and set the number to be that. */ if (!QED_IS_BB(p_hwfn->cdev)) { qed_for_each_vf(p_hwfn, i) { struct qed_vf_info *p_vf; p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true); if (!p_vf) continue; current_max = max_t(u8, current_max, p_vf->num_sbs); } } if (num_sbs > current_max) return qed_mcp_config_vf_msix(p_hwfn, p_ptt, abs_vf_id, num_sbs); return 0; } static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; int rc; /* It's possible VF was previously considered malicious - * clear the indication even if we're only going to disable VF. */ vf->b_malicious = false; if (vf->to_disable) return 0; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs); if (rc) return rc; qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, p_hwfn->hw_info.hw_mode); /* unpretend */ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); vf->state = VF_FREE; return rc; } /** * qed_iov_config_perm_table() - Configure the permission zone table. * * @p_hwfn: HW device data. * @p_ptt: PTT window for writing the registers. * @vf: VF info data. * @enable: The actual permission for this VF. * * In E4, queue zone permission table size is 320x9. There * are 320 VF queues for single engine device (256 for dual * engine device), and each entry has the following format: * {Valid, VF[7:0]} */ static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf, u8 enable) { u32 reg_addr, val; u16 qzone_id = 0; int qid; for (qid = 0; qid < vf->num_rxqs; qid++) { qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, &qzone_id); reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; val = enable ? (vf->abs_vf_id | BIT(8)) : 0; qed_wr(p_hwfn, p_ptt, reg_addr, val); } } static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { /* Reset vf in IGU - interrupts are still disabled */ qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1); /* Permission Table */ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true); } static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf, u16 num_rx_queues) { struct qed_igu_block *p_block; struct cau_sb_entry sb_entry; int qid = 0; u32 val = 0; if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov) num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov; p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues; SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); for (qid = 0; qid < num_rx_queues; qid++) { p_block = qed_get_igu_free_sb(p_hwfn, false); vf->igu_sbs[qid] = p_block->igu_sb_id; p_block->status &= ~QED_IGU_STATUS_FREE; SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); qed_wr(p_hwfn, p_ptt, IGU_REG_MAPPING_MEMORY + sizeof(u32) * p_block->igu_sb_id, val); /* Configure igu sb in CAU which were marked valid */ qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, vf->abs_vf_id, 1); qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry, CAU_REG_SB_VAR_MEMORY + p_block->igu_sb_id * sizeof(u64), 2, NULL); } vf->num_sbs = (u8)num_rx_queues; return vf->num_sbs; } static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; int idx, igu_id; u32 addr, val; /* Invalidate igu CAM lines and mark them as free */ for (idx = 0; idx < vf->num_sbs; idx++) { igu_id = vf->igu_sbs[idx]; addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; val = qed_rd(p_hwfn, p_ptt, addr); SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); qed_wr(p_hwfn, p_ptt, addr, val); p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE; p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++; } vf->num_sbs = 0; } static void qed_iov_set_link(struct qed_hwfn *p_hwfn, u16 vfid, struct qed_mcp_link_params *params, struct qed_mcp_link_state *link, struct qed_mcp_link_capabilities *p_caps) { struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, vfid, false); struct qed_bulletin_content *p_bulletin; if (!p_vf) return; p_bulletin = p_vf->bulletin.p_virt; p_bulletin->req_autoneg = params->speed.autoneg; p_bulletin->req_adv_speed = params->speed.advertised_speeds; p_bulletin->req_forced_speed = params->speed.forced_speed; p_bulletin->req_autoneg_pause = params->pause.autoneg; p_bulletin->req_forced_rx = params->pause.forced_rx; p_bulletin->req_forced_tx = params->pause.forced_tx; p_bulletin->req_loopback = params->loopback_mode; p_bulletin->link_up = link->link_up; p_bulletin->speed = link->speed; p_bulletin->full_duplex = link->full_duplex; p_bulletin->autoneg = link->an; p_bulletin->autoneg_complete = link->an_complete; p_bulletin->parallel_detection = link->parallel_detection; p_bulletin->pfc_enabled = link->pfc_enabled; p_bulletin->partner_adv_speed = link->partner_adv_speed; p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; p_bulletin->partner_adv_pause = link->partner_adv_pause; p_bulletin->sfp_tx_fault = link->sfp_tx_fault; p_bulletin->capability_speed = p_caps->speed_capabilities; } static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_iov_vf_init_params *p_params) { struct qed_mcp_link_capabilities link_caps; struct qed_mcp_link_params link_params; struct qed_mcp_link_state link_state; u8 num_of_vf_avaiable_chains = 0; struct qed_vf_info *vf = NULL; u16 qid, num_irqs; int rc = 0; u32 cids; u8 i; vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); if (!vf) { DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__); return -EINVAL; } if (vf->b_init) { DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", p_params->rel_vf_id); return -EINVAL; } /* Perform sanity checking on the requested queue_id */ for (i = 0; i < p_params->num_queues; i++) { u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE); u16 max_vf_qzone = min_vf_qzone + FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1; qid = p_params->req_rx_queue[i]; if (qid < min_vf_qzone || qid > max_vf_qzone) { DP_NOTICE(p_hwfn, "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n", qid, p_params->rel_vf_id, min_vf_qzone, max_vf_qzone); return -EINVAL; } qid = p_params->req_tx_queue[i]; if (qid > max_vf_qzone) { DP_NOTICE(p_hwfn, "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n", qid, p_params->rel_vf_id, max_vf_qzone); return -EINVAL; } /* If client *really* wants, Tx qid can be shared with PF */ if (qid < min_vf_qzone) DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n", p_params->rel_vf_id, qid, i); } /* Limit number of queues according to number of CIDs */ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", vf->relative_vf_id, p_params->num_queues, (u16)cids); num_irqs = min_t(u16, p_params->num_queues, ((u16)cids)); num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, p_ptt, vf, num_irqs); if (!num_of_vf_avaiable_chains) { DP_ERR(p_hwfn, "no available igu sbs\n"); return -ENOMEM; } /* Choose queue number and index ranges */ vf->num_rxqs = num_of_vf_avaiable_chains; vf->num_txqs = num_of_vf_avaiable_chains; for (i = 0; i < vf->num_rxqs; i++) { struct qed_vf_queue *p_queue = &vf->vf_queues[i]; p_queue->fw_rx_qid = p_params->req_rx_queue[i]; p_queue->fw_tx_qid = p_params->req_tx_queue[i]; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n", vf->relative_vf_id, i, vf->igu_sbs[i], p_queue->fw_rx_qid, p_queue->fw_tx_qid); } /* Update the link configuration in bulletin */ memcpy(&link_params, qed_mcp_get_link_params(p_hwfn), sizeof(link_params)); memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state)); memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(link_caps)); qed_iov_set_link(p_hwfn, p_params->rel_vf_id, &link_params, &link_state, &link_caps); rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); if (!rc) { vf->b_init = true; if (IS_LEAD_HWFN(p_hwfn)) p_hwfn->cdev->p_iov_info->num_vfs++; } return rc; } static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 rel_vf_id) { struct qed_mcp_link_capabilities caps; struct qed_mcp_link_params params; struct qed_mcp_link_state link; struct qed_vf_info *vf = NULL; vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); if (!vf) { DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__); return -EINVAL; } if (vf->bulletin.p_virt) memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); /* Get the link configuration back in bulletin so * that when VFs are re-enabled they get the actual * link configuration. */ memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params)); memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link)); memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps); /* Forget the VF's acquisition message */ memset(&vf->acquire, 0, sizeof(vf->acquire)); /* disablng interrupts and resetting permission table was done during * vf-close, however, we could get here without going through vf_close */ /* Disable Interrupts for VF */ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); /* Reset Permission table */ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); vf->num_rxqs = 0; vf->num_txqs = 0; qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); if (vf->b_init) { vf->b_init = false; if (IS_LEAD_HWFN(p_hwfn)) p_hwfn->cdev->p_iov_info->num_vfs--; } return 0; } static bool qed_iov_tlv_supported(u16 tlvtype) { return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; } /* place a given tlv on the tlv buffer, continuing current tlv list */ void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) { struct channel_tlv *tl = (struct channel_tlv *)*offset; tl->type = type; tl->length = length; /* Offset should keep pointing to next TLV (the end of the last) */ *offset += length; /* Return a pointer to the start of the added tlv */ return *offset - length; } /* list the types and lengths of the tlvs on the buffer */ void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list) { u16 i = 1, total_length = 0; struct channel_tlv *tlv; do { tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); /* output tlv */ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "TLV number %d: type %d, length %d\n", i, tlv->type, tlv->length); if (tlv->type == CHANNEL_TLV_LIST_END) return; /* Validate entry - protect against malicious VFs */ if (!tlv->length) { DP_NOTICE(p_hwfn, "TLV of length 0 found\n"); return; } total_length += tlv->length; if (total_length >= sizeof(struct tlv_buffer_size)) { DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n"); return; } i++; } while (1); } static void qed_iov_send_response(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *p_vf, u16 length, u8 status) { struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; struct qed_dmae_params params; u8 eng_vf_id; mbx->reply_virt->default_resp.hdr.status = status; qed_dp_tlv_list(p_hwfn, mbx->reply_virt); eng_vf_id = p_vf->abs_vf_id; memset(&params, 0, sizeof(params)); SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1); params.dst_vfid = eng_vf_id; qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), mbx->req_virt->first_tlv.reply_address + sizeof(u64), (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, &params); /* Once PF copies the rc to the VF, the latter can continue * and send an additional message. So we have to make sure the * channel would be re-set to ready prior to that. */ REG_WR(p_hwfn, GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, USTORM_VF_PF_CHANNEL_READY, eng_vf_id), 1); qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, mbx->req_virt->first_tlv.reply_address, sizeof(u64) / 4, &params); } static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, enum qed_iov_vport_update_flag flag) { switch (flag) { case QED_IOV_VP_UPDATE_ACTIVATE: return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; case QED_IOV_VP_UPDATE_VLAN_STRIP: return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; case QED_IOV_VP_UPDATE_TX_SWITCH: return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; case QED_IOV_VP_UPDATE_MCAST: return CHANNEL_TLV_VPORT_UPDATE_MCAST; case QED_IOV_VP_UPDATE_ACCEPT_PARAM: return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; case QED_IOV_VP_UPDATE_RSS: return CHANNEL_TLV_VPORT_UPDATE_RSS; case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; case QED_IOV_VP_UPDATE_SGE_TPA: return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; default: return 0; } } static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_iov_vf_mbx *p_mbx, u8 status, u16 tlvs_mask, u16 tlvs_accepted) { struct pfvf_def_resp_tlv *resp; u16 size, total_len, i; memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); p_mbx->offset = (u8 *)p_mbx->reply_virt; size = sizeof(struct pfvf_def_resp_tlv); total_len = size; qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); /* Prepare response for all extended tlvs if they are found by PF */ for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { if (!(tlvs_mask & BIT(i))) continue; resp = qed_add_tlv(p_hwfn, &p_mbx->offset, qed_iov_vport_to_tlv(p_hwfn, i), size); if (tlvs_accepted & BIT(i)) resp->hdr.status = status; else resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] - vport_update response: TLV %d, status %02x\n", p_vf->relative_vf_id, qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); total_len += size; } qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); return total_len; } static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf_info, u16 type, u16 length, u8 status) { struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; mbx->offset = (u8 *)mbx->reply_virt; qed_add_tlv(p_hwfn, &mbx->offset, type, length); qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); } static struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn, u16 relative_vf_id, bool b_enabled_only) { struct qed_vf_info *vf = NULL; vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); if (!vf) return NULL; return &vf->p_vf_info; } static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) { struct qed_public_vf_info *vf_info; vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false); if (!vf_info) return; /* Clear the VF mac */ eth_zero_addr(vf_info->mac); vf_info->rx_accept_mode = 0; vf_info->tx_accept_mode = 0; } static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) { u32 i, j; p_vf->vf_bulletin = 0; p_vf->vport_instance = 0; p_vf->configured_features = 0; /* If VF previously requested less resources, go back to default */ p_vf->num_rxqs = p_vf->num_sbs; p_vf->num_txqs = p_vf->num_sbs; p_vf->num_active_rxqs = 0; for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) { if (!p_queue->cids[j].p_cid) continue; qed_eth_queue_cid_release(p_hwfn, p_queue->cids[j].p_cid); p_queue->cids[j].p_cid = NULL; } } memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); } /* Returns either 0, or log(size) */ static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE); if (val) return val + 11; return 0; } static void qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *p_vf, struct vf_pf_resc_request *p_req, struct pf_vf_resc *p_resp) { u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons; u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) - qed_db_addr_vf(0, DQ_DEMS_LEGACY); u32 bar_size; p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons); /* If VF didn't bother asking for QIDs than don't bother limiting * number of CIDs. The VF doesn't care about the number, and this * has the likely result of causing an additional acquisition. */ if (!(p_vf->acquire.vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)) return; /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount * that would make sure doorbells for all CIDs fall within the bar. * If it doesn't, make sure regview window is sufficient. */ if (p_vf->acquire.vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PHYSICAL_BAR) { bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); if (bar_size) bar_size = 1 << bar_size; if (p_hwfn->cdev->num_hwfns > 1) bar_size /= 2; } else { bar_size = PXP_VF_BAR0_DQ_LENGTH; } if (bar_size / db_size < 256) p_resp->num_cids = min_t(u8, p_resp->num_cids, (u8)(bar_size / db_size)); } static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *p_vf, struct vf_pf_resc_request *p_req, struct pf_vf_resc *p_resp) { u8 i; /* Queue related information */ p_resp->num_rxqs = p_vf->num_rxqs; p_resp->num_txqs = p_vf->num_txqs; p_resp->num_sbs = p_vf->num_sbs; for (i = 0; i < p_resp->num_sbs; i++) { p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; p_resp->hw_sbs[i].sb_qid = 0; } /* These fields are filled for backward compatibility. * Unused by modern vfs. */ for (i = 0; i < p_resp->num_rxqs; i++) { qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, (u16 *)&p_resp->hw_qid[i]); p_resp->cid[i] = i; } /* Filter related information */ p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters, p_req->num_mac_filters); p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, p_req->num_vlan_filters); qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp); /* This isn't really needed/enforced, but some legacy VFs might depend * on the correct filling of this field. */ p_resp->num_mc_filters = QED_MAX_MC_ADDRS; /* Validate sufficient resources for VF */ if (p_resp->num_rxqs < p_req->num_rxqs || p_resp->num_txqs < p_req->num_txqs || p_resp->num_sbs < p_req->num_sbs || p_resp->num_mac_filters < p_req->num_mac_filters || p_resp->num_vlan_filters < p_req->num_vlan_filters || p_resp->num_mc_filters < p_req->num_mc_filters || p_resp->num_cids < p_req->num_cids) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n", p_vf->abs_vf_id, p_req->num_rxqs, p_resp->num_rxqs, p_req->num_rxqs, p_resp->num_txqs, p_req->num_sbs, p_resp->num_sbs, p_req->num_mac_filters, p_resp->num_mac_filters, p_req->num_vlan_filters, p_resp->num_vlan_filters, p_req->num_mc_filters, p_resp->num_mc_filters, p_req->num_cids, p_resp->num_cids); /* Some legacy OSes are incapable of correctly handling this * failure. */ if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == ETH_HSI_VER_NO_PKT_LEN_TUNN) && (p_vf->acquire.vfdev_info.os_type == VFPF_ACQUIRE_OS_WINDOWS)) return PFVF_STATUS_SUCCESS; return PFVF_STATUS_NO_RESOURCE; } return PFVF_STATUS_SUCCESS; } static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn, struct pfvf_stats_info *p_stats) { p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat); p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B + offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat); p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B + offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat); p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); p_stats->tstats.address = 0; p_stats->tstats.len = 0; } static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; struct pf_vf_resc *resc = &resp->resc; int rc; memset(resp, 0, sizeof(*resp)); /* Write the PF version so that VF would know which version * is supported - might be later overridden. This guarantees that * VF could recognize legacy PF based on lack of versions in reply. */ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; if (vf->state != VF_FREE && vf->state != VF_STOPPED) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] sent ACQUIRE but is already in state %d - fail request\n", vf->abs_vf_id, vf->state); goto out; } /* Validate FW compatibility */ if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI) { struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] is pre-fastpath HSI\n", vf->abs_vf_id); p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR; p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; } else { DP_INFO(p_hwfn, "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n", vf->abs_vf_id, req->vfdev_info.eth_fp_hsi_major, req->vfdev_info.eth_fp_hsi_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); goto out; } } /* On 100g PFs, prevent old VFs from loading */ if ((p_hwfn->cdev->num_hwfns > 1) && !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { DP_INFO(p_hwfn, "VF[%d] is running an old driver that doesn't support 100g\n", vf->abs_vf_id); goto out; } /* Store the acquire message */ memcpy(&vf->acquire, req, sizeof(vf->acquire)); vf->opaque_fid = req->vfdev_info.opaque_fid; vf->vf_bulletin = req->bulletin_addr; vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? vf->bulletin.size : req->bulletin_size; /* fill in pfdev info */ pfdev_info->chip_num = p_hwfn->cdev->chip_num; pfdev_info->db_size = 0; pfdev_info->indices_per_sb = PIS_PER_SB; pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; if (p_hwfn->cdev->num_hwfns > 1) pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; /* Share our ability to use multiple queue-ids only with VFs * that request it. */ if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS) pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS; /* Share the sizes of the bars with VF */ resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); pfdev_info->fw_major = FW_MAJOR_VERSION; pfdev_info->fw_minor = FW_MINOR_VERSION; pfdev_info->fw_rev = FW_REVISION_VERSION; pfdev_info->fw_eng = FW_ENGINEERING_VERSION; /* Incorrect when legacy, but doesn't matter as legacy isn't reading * this field. */ pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR, req->vfdev_info.eth_fp_hsi_minor); pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); pfdev_info->dev_type = p_hwfn->cdev->type; pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; /* Fill resources available to VF; Make sure there are enough to * satisfy the VF's request. */ vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf, &req->resc_request, resc); if (vfpf_status != PFVF_STATUS_SUCCESS) goto out; /* Start the VF in FW */ rc = qed_sp_vf_start(p_hwfn, vf); if (rc) { DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); vfpf_status = PFVF_STATUS_FAILURE; goto out; } /* Fill agreed size of bulletin board in response */ resp->bulletin_size = vf->bulletin.size; qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n", vf->abs_vf_id, resp->pfdev_info.chip_num, resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb, resp->pfdev_info.capabilities, resc->num_rxqs, resc->num_txqs, resc->num_sbs, resc->num_mac_filters, resc->num_vlan_filters); vf->state = VF_ACQUIRED; /* Prepare Response */ out: qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); } static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, bool val) { struct qed_sp_vport_update_params params; int rc; if (val == p_vf->spoof_chk) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Spoofchk value[%d] is already configured\n", val); return 0; } memset(&params, 0, sizeof(struct qed_sp_vport_update_params)); params.opaque_fid = p_vf->opaque_fid; params.vport_id = p_vf->vport_id; params.update_anti_spoofing_en_flg = 1; params.anti_spoofing_en = val; rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL); if (!rc) { p_vf->spoof_chk = val; p_vf->req_spoofchk_val = p_vf->spoof_chk; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Spoofchk val[%d] configured\n", val); } else { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Spoofchk configuration[val:%d] failed for VF[%d]\n", val, p_vf->relative_vf_id); } return rc; } static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) { struct qed_filter_ucast filter; int rc = 0; int i; memset(&filter, 0, sizeof(filter)); filter.is_rx_filter = 1; filter.is_tx_filter = 1; filter.vport_to_add_to = p_vf->vport_id; filter.opcode = QED_FILTER_ADD; /* Reconfigure vlans */ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { if (!p_vf->shadow_config.vlans[i].used) continue; filter.type = QED_FILTER_VLAN; filter.vlan = p_vf->shadow_config.vlans[i].vid; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", filter.vlan, p_vf->relative_vf_id); rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, &filter, QED_SPQ_MODE_CB, NULL); if (rc) { DP_NOTICE(p_hwfn, "Failed to configure VLAN [%04x] to VF [%04x]\n", filter.vlan, p_vf->relative_vf_id); break; } } return rc; } static int qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, u64 events) { int rc = 0; if ((events & BIT(VLAN_ADDR_FORCED)) && !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); return rc; } static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, u64 events) { int rc = 0; struct qed_filter_ucast filter; if (!p_vf->vport_instance) return -EINVAL; if ((events & BIT(MAC_ADDR_FORCED)) || p_vf->p_vf_info.is_trusted_configured) { /* Since there's no way [currently] of removing the MAC, * we can always assume this means we need to force it. */ memset(&filter, 0, sizeof(filter)); filter.type = QED_FILTER_MAC; filter.opcode = QED_FILTER_REPLACE; filter.is_rx_filter = 1; filter.is_tx_filter = 1; filter.vport_to_add_to = p_vf->vport_id; ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac); rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, &filter, QED_SPQ_MODE_CB, NULL); if (rc) { DP_NOTICE(p_hwfn, "PF failed to configure MAC for VF\n"); return rc; } if (p_vf->p_vf_info.is_trusted_configured) p_vf->configured_features |= BIT(VFPF_BULLETIN_MAC_ADDR); else p_vf->configured_features |= BIT(MAC_ADDR_FORCED); } if (events & BIT(VLAN_ADDR_FORCED)) { struct qed_sp_vport_update_params vport_update; u8 removal; int i; memset(&filter, 0, sizeof(filter)); filter.type = QED_FILTER_VLAN; filter.is_rx_filter = 1; filter.is_tx_filter = 1; filter.vport_to_add_to = p_vf->vport_id; filter.vlan = p_vf->bulletin.p_virt->pvid; filter.opcode = filter.vlan ? QED_FILTER_REPLACE : QED_FILTER_FLUSH; /* Send the ramrod */ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, &filter, QED_SPQ_MODE_CB, NULL); if (rc) { DP_NOTICE(p_hwfn, "PF failed to configure VLAN for VF\n"); return rc; } /* Update the default-vlan & silent vlan stripping */ memset(&vport_update, 0, sizeof(vport_update)); vport_update.opaque_fid = p_vf->opaque_fid; vport_update.vport_id = p_vf->vport_id; vport_update.update_default_vlan_enable_flg = 1; vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; vport_update.update_default_vlan_flg = 1; vport_update.default_vlan = filter.vlan; vport_update.update_inner_vlan_removal_flg = 1; removal = filter.vlan ? 1 : p_vf->shadow_config.inner_vlan_removal; vport_update.inner_vlan_removal_flg = removal; vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; rc = qed_sp_vport_update(p_hwfn, &vport_update, QED_SPQ_MODE_EBLOCK, NULL); if (rc) { DP_NOTICE(p_hwfn, "PF failed to configure VF vport for vlan\n"); return rc; } /* Update all the Rx queues */ for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; struct qed_queue_cid *p_cid = NULL; /* There can be at most 1 Rx queue on qzone. Find it */ p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); if (!p_cid) continue; rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&p_cid, 1, 0, 1, QED_SPQ_MODE_EBLOCK, NULL); if (rc) { DP_NOTICE(p_hwfn, "Failed to send Rx update fo queue[0x%04x]\n", p_cid->rel.queue_id); return rc; } } if (filter.vlan) p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; else p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED); } /* If forced features are terminated, we need to configure the shadow * configuration back again. */ if (events) qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); return rc; } static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { struct qed_sp_vport_start_params params = { 0 }; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct vfpf_vport_start_tlv *start; u8 status = PFVF_STATUS_SUCCESS; struct qed_vf_info *vf_info; u64 *p_bitmap; int sb_id; int rc; vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true); if (!vf_info) { DP_NOTICE(p_hwfn->cdev, "Failed to get VF info, invalid vfid [%d]\n", vf->relative_vf_id); return; } vf->state = VF_ENABLED; start = &mbx->req_virt->start_vport; qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); /* Initialize Status block in CAU */ for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { if (!start->sb_addr[sb_id]) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] did not fill the address of SB %d\n", vf->relative_vf_id, sb_id); break; } qed_int_cau_conf_sb(p_hwfn, p_ptt, start->sb_addr[sb_id], vf->igu_sbs[sb_id], vf->abs_vf_id, 1); } vf->mtu = start->mtu; vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; /* Take into consideration configuration forced by hypervisor; * If none is configured, use the supplied VF values [for old * vfs that would still be fine, since they passed '0' as padding]. */ p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { u8 vf_req = start->only_untagged; vf_info->bulletin.p_virt->default_only_untagged = vf_req; *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; } params.tpa_mode = start->tpa_mode; params.remove_inner_vlan = start->inner_vlan_removal; params.tx_switching = true; params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; params.drop_ttl0 = false; params.concrete_fid = vf->concrete_fid; params.opaque_fid = vf->opaque_fid; params.vport_id = vf->vport_id; params.max_buffers_per_cqe = start->max_buffers_per_cqe; params.mtu = vf->mtu; /* Non trusted VFs should enable control frame filtering */ params.check_mac = !vf->p_vf_info.is_trusted_configured; rc = qed_sp_eth_vport_start(p_hwfn, &params); if (rc) { DP_ERR(p_hwfn, "%s returned error %d\n", __func__, rc); status = PFVF_STATUS_FAILURE; } else { vf->vport_instance++; /* Force configuration if needed on the newly opened vport */ qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); } qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, sizeof(struct pfvf_def_resp_tlv), status); } static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { u8 status = PFVF_STATUS_SUCCESS; int rc; vf->vport_instance--; vf->spoof_chk = false; if ((qed_iov_validate_active_rxq(p_hwfn, vf)) || (qed_iov_validate_active_txq(p_hwfn, vf))) { vf->b_malicious = true; DP_NOTICE(p_hwfn, "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n", vf->abs_vf_id); status = PFVF_STATUS_MALICIOUS; goto out; } rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); if (rc) { DP_ERR(p_hwfn, "%s returned error %d\n", __func__, rc); status = PFVF_STATUS_FAILURE; } /* Forget the configuration on the vport */ vf->configured_features = 0; memset(&vf->shadow_config, 0, sizeof(vf->shadow_config)); out: qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, sizeof(struct pfvf_def_resp_tlv), status); } static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf, u8 status, bool b_legacy) { struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct pfvf_start_queue_resp_tlv *p_tlv; struct vfpf_start_rxq_tlv *req; u16 length; mbx->offset = (u8 *)mbx->reply_virt; /* Taking a bigger struct instead of adding a TLV to list was a * mistake, but one which we're now stuck with, as some older * clients assume the size of the previous response. */ if (!b_legacy) length = sizeof(*p_tlv); else length = sizeof(struct pfvf_def_resp_tlv); p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, length); qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); /* Update the TLV with the response */ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { req = &mbx->req_virt->start_rxq; p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + offsetof(struct mstorm_vf_zone, non_trigger.eth_rx_queue_producers) + sizeof(struct eth_rx_prod_data) * req->rx_qid; } qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); } static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, bool b_is_tx) { struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx; struct vfpf_qid_tlv *p_qid_tlv; /* Search for the qid if the VF published its going to provide it */ if (!(p_vf->acquire.vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)) { if (b_is_tx) return QED_IOV_LEGACY_QID_TX; else return QED_IOV_LEGACY_QID_RX; } p_qid_tlv = (struct vfpf_qid_tlv *) qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, CHANNEL_TLV_QID); if (!p_qid_tlv) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%2x]: Failed to provide qid\n", p_vf->relative_vf_id); return QED_IOV_QID_INVALID; } if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%02x]: Provided qid out-of-bounds %02x\n", p_vf->relative_vf_id, p_qid_tlv->qid); return QED_IOV_QID_INVALID; } return p_qid_tlv->qid; } static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { struct qed_queue_start_common_params params; struct qed_queue_cid_vf_params vf_params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; u8 status = PFVF_STATUS_NO_RESOURCE; u8 qid_usage_idx, vf_legacy = 0; struct vfpf_start_rxq_tlv *req; struct qed_vf_queue *p_queue; struct qed_queue_cid *p_cid; struct qed_sb_info sb_dummy; int rc; req = &mbx->req_virt->start_rxq; if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid, QED_IOV_VALIDATE_Q_DISABLE) || !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) goto out; qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); if (qid_usage_idx == QED_IOV_QID_INVALID) goto out; p_queue = &vf->vf_queues[req->rx_qid]; if (p_queue->cids[qid_usage_idx].p_cid) goto out; vf_legacy = qed_vf_calculate_legacy(vf); /* Acquire a new queue-cid */ memset(&params, 0, sizeof(params)); params.queue_id = p_queue->fw_rx_qid; params.vport_id = vf->vport_id; params.stats_id = vf->abs_vf_id + 0x10; /* Since IGU index is passed via sb_info, construct a dummy one */ memset(&sb_dummy, 0, sizeof(sb_dummy)); sb_dummy.igu_sb_id = req->hw_sb; params.p_sb = &sb_dummy; params.sb_idx = req->sb_index; memset(&vf_params, 0, sizeof(vf_params)); vf_params.vfid = vf->relative_vf_id; vf_params.vf_qid = (u8)req->rx_qid; vf_params.vf_legacy = vf_legacy; vf_params.qid_usage_idx = qid_usage_idx; p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, &params, true, &vf_params); if (!p_cid) goto out; /* Legacy VFs have their Producers in a different location, which they * calculate on their own and clean the producer prior to this. */ if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD)) qed_wr(p_hwfn, p_ptt, MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), 0); rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid, req->bd_max_bytes, req->rxq_addr, req->cqe_pbl_addr, req->cqe_pbl_size); if (rc) { status = PFVF_STATUS_FAILURE; qed_eth_queue_cid_release(p_hwfn, p_cid); } else { p_queue->cids[qid_usage_idx].p_cid = p_cid; p_queue->cids[qid_usage_idx].b_is_tx = false; status = PFVF_STATUS_SUCCESS; vf->num_active_rxqs++; } out: qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, !!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD)); } static void qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp, struct qed_tunnel_info *p_tun, u16 tunn_feature_mask) { p_resp->tunn_feature_mask = tunn_feature_mask; p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled; p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled; p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled; p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled; p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled; p_resp->vxlan_clss = p_tun->vxlan.tun_cls; p_resp->l2gre_clss = p_tun->l2_gre.tun_cls; p_resp->ipgre_clss = p_tun->ip_gre.tun_cls; p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls; p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls; p_resp->geneve_udp_port = p_tun->geneve_port.port; p_resp->vxlan_udp_port = p_tun->vxlan_port.port; } static void __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, struct qed_tunn_update_type *p_tun, enum qed_tunn_mode mask, u8 tun_cls) { if (p_req->tun_mode_update_mask & BIT(mask)) { p_tun->b_update_mode = true; if (p_req->tunn_mode & BIT(mask)) p_tun->b_mode_enabled = true; } p_tun->tun_cls = tun_cls; } static void qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, struct qed_tunn_update_type *p_tun, struct qed_tunn_update_udp_port *p_port, enum qed_tunn_mode mask, u8 tun_cls, u8 update_port, u16 port) { if (update_port) { p_port->b_update_port = true; p_port->port = port; } __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls); } static bool qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req) { bool b_update_requested = false; if (p_req->tun_mode_update_mask || p_req->update_tun_cls || p_req->update_geneve_port || p_req->update_vxlan_port) b_update_requested = true; return b_update_requested; } static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc) { if (tun->b_update_mode && !tun->b_mode_enabled) { tun->b_update_mode = false; *rc = -EINVAL; } } static int qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn, u16 *tun_features, bool *update, struct qed_tunnel_info *tun_src) { struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth; struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel; u16 bultn_vxlan_port, bultn_geneve_port; void *cookie = p_hwfn->cdev->ops_cookie; int i, rc = 0; *tun_features = p_hwfn->cdev->tunn_feature_mask; bultn_vxlan_port = tun->vxlan_port.port; bultn_geneve_port = tun->geneve_port.port; qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc); qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc); qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc); qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc); qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc); if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) && (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN || tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN || tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) { tun_src->b_update_rx_cls = false; tun_src->b_update_tx_cls = false; rc = -EINVAL; } if (tun_src->vxlan_port.b_update_port) { if (tun_src->vxlan_port.port == tun->vxlan_port.port) { tun_src->vxlan_port.b_update_port = false; } else { *update = true; bultn_vxlan_port = tun_src->vxlan_port.port; } } if (tun_src->geneve_port.b_update_port) { if (tun_src->geneve_port.port == tun->geneve_port.port) { tun_src->geneve_port.b_update_port = false; } else { *update = true; bultn_geneve_port = tun_src->geneve_port.port; } } qed_for_each_vf(p_hwfn, i) { qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port, bultn_geneve_port); } qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port); return rc; } static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *p_vf) { struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; struct pfvf_update_tunn_param_tlv *p_resp; struct vfpf_update_tunn_param_tlv *p_req; u8 status = PFVF_STATUS_SUCCESS; bool b_update_required = false; struct qed_tunnel_info tunn; u16 tunn_feature_mask = 0; int i, rc = 0; mbx->offset = (u8 *)mbx->reply_virt; memset(&tunn, 0, sizeof(tunn)); p_req = &mbx->req_virt->tunn_param_update; if (!qed_iov_pf_validate_tunn_param(p_req)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No tunnel update requested by VF\n"); status = PFVF_STATUS_FAILURE; goto send_resp; } tunn.b_update_rx_cls = p_req->update_tun_cls; tunn.b_update_tx_cls = p_req->update_tun_cls; qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port, QED_MODE_VXLAN_TUNN, p_req->vxlan_clss, p_req->update_vxlan_port, p_req->vxlan_port); qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port, QED_MODE_L2GENEVE_TUNN, p_req->l2geneve_clss, p_req->update_geneve_port, p_req->geneve_port); __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve, QED_MODE_IPGENEVE_TUNN, p_req->ipgeneve_clss); __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre, QED_MODE_L2GRE_TUNN, p_req->l2gre_clss); __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre, QED_MODE_IPGRE_TUNN, p_req->ipgre_clss); /* If PF modifies VF's req then it should * still return an error in case of partial configuration * or modified configuration as opposed to requested one. */ rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask, &b_update_required, &tunn); if (rc) status = PFVF_STATUS_FAILURE; /* If QED client is willing to update anything ? */ if (b_update_required) { u16 geneve_port; rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn, QED_SPQ_MODE_EBLOCK, NULL); if (rc) status = PFVF_STATUS_FAILURE; geneve_port = p_tun->geneve_port.port; qed_for_each_vf(p_hwfn, i) { qed_iov_bulletin_set_udp_ports(p_hwfn, i, p_tun->vxlan_port.port, geneve_port); } } send_resp: p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp)); qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask); qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); } static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *p_vf, u32 cid, u8 status) { struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; struct pfvf_start_queue_resp_tlv *p_tlv; bool b_legacy = false; u16 length; mbx->offset = (u8 *)mbx->reply_virt; /* Taking a bigger struct instead of adding a TLV to list was a * mistake, but one which we're now stuck with, as some older * clients assume the size of the previous response. */ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == ETH_HSI_VER_NO_PKT_LEN_TUNN) b_legacy = true; if (!b_legacy) length = sizeof(*p_tlv); else length = sizeof(struct pfvf_def_resp_tlv); p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, length); qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); /* Update the TLV with the response */ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY); qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); } static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { struct qed_queue_start_common_params params; struct qed_queue_cid_vf_params vf_params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; u8 status = PFVF_STATUS_NO_RESOURCE; struct vfpf_start_txq_tlv *req; struct qed_vf_queue *p_queue; struct qed_queue_cid *p_cid; struct qed_sb_info sb_dummy; u8 qid_usage_idx, vf_legacy; u32 cid = 0; int rc; u16 pq; memset(&params, 0, sizeof(params)); req = &mbx->req_virt->start_txq; if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid, QED_IOV_VALIDATE_Q_NA) || !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) goto out; qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); if (qid_usage_idx == QED_IOV_QID_INVALID) goto out; p_queue = &vf->vf_queues[req->tx_qid]; if (p_queue->cids[qid_usage_idx].p_cid) goto out; vf_legacy = qed_vf_calculate_legacy(vf); /* Acquire a new queue-cid */ params.queue_id = p_queue->fw_tx_qid; params.vport_id = vf->vport_id; params.stats_id = vf->abs_vf_id + 0x10; /* Since IGU index is passed via sb_info, construct a dummy one */ memset(&sb_dummy, 0, sizeof(sb_dummy)); sb_dummy.igu_sb_id = req->hw_sb; params.p_sb = &sb_dummy; params.sb_idx = req->sb_index; memset(&vf_params, 0, sizeof(vf_params)); vf_params.vfid = vf->relative_vf_id; vf_params.vf_qid = (u8)req->tx_qid; vf_params.vf_legacy = vf_legacy; vf_params.qid_usage_idx = qid_usage_idx; p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, &params, false, &vf_params); if (!p_cid) goto out; pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id); rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, req->pbl_addr, req->pbl_size, pq); if (rc) { status = PFVF_STATUS_FAILURE; qed_eth_queue_cid_release(p_hwfn, p_cid); } else { status = PFVF_STATUS_SUCCESS; p_queue->cids[qid_usage_idx].p_cid = p_cid; p_queue->cids[qid_usage_idx].b_is_tx = true; cid = p_cid->cid; } out: qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status); } static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf, u16 rxq_id, u8 qid_usage_idx, bool cqe_completion) { struct qed_vf_queue *p_queue; int rc = 0; if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n", vf->relative_vf_id, rxq_id, qid_usage_idx); return -EINVAL; } p_queue = &vf->vf_queues[rxq_id]; /* We've validated the index and the existence of the active RXQ - * now we need to make sure that it's using the correct qid. */ if (!p_queue->cids[qid_usage_idx].p_cid || p_queue->cids[qid_usage_idx].b_is_tx) { struct qed_queue_cid *p_cid; p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n", vf->relative_vf_id, rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx); return -EINVAL; } /* Now that we know we have a valid Rx-queue - close it */ rc = qed_eth_rx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid, false, cqe_completion); if (rc) return rc; p_queue->cids[qid_usage_idx].p_cid = NULL; vf->num_active_rxqs--; return 0; } static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf, u16 txq_id, u8 qid_usage_idx) { struct qed_vf_queue *p_queue; int rc = 0; if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA)) return -EINVAL; p_queue = &vf->vf_queues[txq_id]; if (!p_queue->cids[qid_usage_idx].p_cid || !p_queue->cids[qid_usage_idx].b_is_tx) return -EINVAL; rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid); if (rc) return rc; p_queue->cids[qid_usage_idx].p_cid = NULL; return 0; } static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { u16 length = sizeof(struct pfvf_def_resp_tlv); struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; u8 status = PFVF_STATUS_FAILURE; struct vfpf_stop_rxqs_tlv *req; u8 qid_usage_idx; int rc; /* There has never been an official driver that used this interface * for stopping multiple queues, and it is now considered deprecated. * Validate this isn't used here. */ req = &mbx->req_virt->stop_rxqs; if (req->num_rxqs != 1) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Odd; VF[%d] tried stopping multiple Rx queues\n", vf->relative_vf_id); status = PFVF_STATUS_NOT_SUPPORTED; goto out; } /* Find which qid-index is associated with the queue */ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); if (qid_usage_idx == QED_IOV_QID_INVALID) goto out; rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, qid_usage_idx, req->cqe_completion); if (!rc) status = PFVF_STATUS_SUCCESS; out: qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, length, status); } static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { u16 length = sizeof(struct pfvf_def_resp_tlv); struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; u8 status = PFVF_STATUS_FAILURE; struct vfpf_stop_txqs_tlv *req; u8 qid_usage_idx; int rc; /* There has never been an official driver that used this interface * for stopping multiple queues, and it is now considered deprecated. * Validate this isn't used here. */ req = &mbx->req_virt->stop_txqs; if (req->num_txqs != 1) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Odd; VF[%d] tried stopping multiple Tx queues\n", vf->relative_vf_id); status = PFVF_STATUS_NOT_SUPPORTED; goto out; } /* Find which qid-index is associated with the queue */ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); if (qid_usage_idx == QED_IOV_QID_INVALID) goto out; rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx); if (!rc) status = PFVF_STATUS_SUCCESS; out: qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, length, status); } static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF]; u16 length = sizeof(struct pfvf_def_resp_tlv); struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct vfpf_update_rxq_tlv *req; u8 status = PFVF_STATUS_FAILURE; u8 complete_event_flg; u8 complete_cqe_flg; u8 qid_usage_idx; int rc; u8 i; req = &mbx->req_virt->update_rxq; complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); if (qid_usage_idx == QED_IOV_QID_INVALID) goto out; /* There shouldn't exist a VF that uses queue-qids yet uses this * API with multiple Rx queues. Validate this. */ if ((vf->acquire.vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] supports QIDs but sends multiple queues\n", vf->relative_vf_id); goto out; } /* Validate inputs - for the legacy case this is still true since * qid_usage_idx for each Rx queue would be LEGACY_QID_RX. */ for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) { if (!qed_iov_validate_rxq(p_hwfn, vf, i, QED_IOV_VALIDATE_Q_NA) || !vf->vf_queues[i].cids[qid_usage_idx].p_cid || vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", vf->relative_vf_id, req->rx_qid, req->num_rxqs); goto out; } } /* Prepare the handlers */ for (i = 0; i < req->num_rxqs; i++) { u16 qid = req->rx_qid + i; handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid; } rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers, req->num_rxqs, complete_cqe_flg, complete_event_flg, QED_SPQ_MODE_EBLOCK, NULL); if (rc) goto out; status = PFVF_STATUS_SUCCESS; out: qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, length, status); } void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, void *p_tlvs_list, u16 req_type) { struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; int len = 0; do { if (!p_tlv->length) { DP_NOTICE(p_hwfn, "Zero length TLV found\n"); return NULL; } if (p_tlv->type == req_type) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Extended tlv type %d, length %d found\n", p_tlv->type, p_tlv->length); return p_tlv; } len += p_tlv->length; p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n"); return NULL; } } while (p_tlv->type != CHANNEL_TLV_LIST_END); return NULL; } static void qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_data, struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) { struct vfpf_vport_update_activate_tlv *p_act_tlv; u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; p_act_tlv = (struct vfpf_vport_update_activate_tlv *) qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); if (!p_act_tlv) return; p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; p_data->vport_active_rx_flg = p_act_tlv->active_rx; p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; p_data->vport_active_tx_flg = p_act_tlv->active_tx; *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE; } static void qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_data, struct qed_vf_info *p_vf, struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) { struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); if (!p_vlan_tlv) return; p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; /* Ignore the VF request if we're forcing a vlan */ if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) { p_data->update_inner_vlan_removal_flg = 1; p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; } *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP; } static void qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_data, struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) { struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); if (!p_tx_switch_tlv) return; p_data->update_tx_switching_flg = 1; p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH; } static void qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_data, struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) { struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); if (!p_mcast_tlv) return; p_data->update_approx_mcast_flg = 1; memcpy(p_data->bins, p_mcast_tlv->bins, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; } static void qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_data, struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) { struct qed_filter_accept_flags *p_flags = &p_data->accept_flags; struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); if (!p_accept_tlv) return; p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM; } static void qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_data, struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) { struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); if (!p_accept_any_vlan) return; p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; p_data->update_accept_any_vlan_flg = p_accept_any_vlan->update_accept_any_vlan_flg; *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; } static void qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf, struct qed_sp_vport_update_params *p_data, struct qed_rss_params *p_rss, struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask, u16 *tlvs_accepted) { struct vfpf_vport_update_rss_tlv *p_rss_tlv; u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; bool b_reject = false; u16 table_size; u16 i, q_idx; p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); if (!p_rss_tlv) { p_data->rss_params = NULL; return; } memset(p_rss, 0, sizeof(struct qed_rss_params)); p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & VFPF_UPDATE_RSS_CONFIG_FLAG); p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & VFPF_UPDATE_RSS_CAPS_FLAG); p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & VFPF_UPDATE_RSS_IND_TABLE_FLAG); p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & VFPF_UPDATE_RSS_KEY_FLAG); p_rss->rss_enable = p_rss_tlv->rss_enable; p_rss->rss_eng_id = vf->relative_vf_id + 1; p_rss->rss_caps = p_rss_tlv->rss_caps; p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), (1 << p_rss_tlv->rss_table_size_log)); for (i = 0; i < table_size; i++) { struct qed_queue_cid *p_cid; q_idx = p_rss_tlv->rss_ind_table[i]; if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx, QED_IOV_VALIDATE_Q_ENABLE)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d]: Omitting RSS due to wrong queue %04x\n", vf->relative_vf_id, q_idx); b_reject = true; goto out; } p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]); p_rss->rss_ind_table[i] = p_cid; } p_data->rss_params = p_rss; out: *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; if (!b_reject) *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS; } static void qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf, struct qed_sp_vport_update_params *p_data, struct qed_sge_tpa_params *p_sge_tpa, struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) { struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); if (!p_sge_tpa_tlv) { p_data->sge_tpa_params = NULL; return; } memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params)); p_sge_tpa->update_tpa_en_flg = !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); p_sge_tpa->update_tpa_param_flg = !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_PARAM_FLAG); p_sge_tpa->tpa_ipv4_en_flg = !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); p_sge_tpa->tpa_ipv6_en_flg = !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); p_sge_tpa->tpa_pkt_split_flg = !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); p_sge_tpa->tpa_hdr_data_split_flg = !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); p_sge_tpa->tpa_gro_consistent_flg = !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; p_data->sge_tpa_params = p_sge_tpa; *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; } static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, u8 vfid, struct qed_sp_vport_update_params *params, u16 *tlvs) { u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; struct qed_filter_accept_flags *flags = &params->accept_flags; struct qed_public_vf_info *vf_info; u16 tlv_mask; tlv_mask = BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM) | BIT(QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN); /* Untrusted VFs can't even be trusted to know that fact. * Simply indicate everything is configured fine, and trace * configuration 'behind their back'. */ if (!(*tlvs & tlv_mask)) return 0; vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); if (flags->update_rx_mode_config) { vf_info->rx_accept_mode = flags->rx_accept_filter; if (!vf_info->is_trusted_configured) flags->rx_accept_filter &= ~mask; } if (flags->update_tx_mode_config) { vf_info->tx_accept_mode = flags->tx_accept_filter; if (!vf_info->is_trusted_configured) flags->tx_accept_filter &= ~mask; } if (params->update_accept_any_vlan_flg) { vf_info->accept_any_vlan = params->accept_any_vlan; if (vf_info->forced_vlan && !vf_info->is_trusted_configured) params->accept_any_vlan = false; } return 0; } static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { struct qed_rss_params *p_rss_params = NULL; struct qed_sp_vport_update_params params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct qed_sge_tpa_params sge_tpa_params; u16 tlvs_mask = 0, tlvs_accepted = 0; u8 status = PFVF_STATUS_SUCCESS; u16 length; int rc; /* Valiate PF can send such a request */ if (!vf->vport_instance) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No VPORT instance available for VF[%d], failing vport update\n", vf->abs_vf_id); status = PFVF_STATUS_FAILURE; goto out; } p_rss_params = vzalloc(sizeof(*p_rss_params)); if (!p_rss_params) { status = PFVF_STATUS_FAILURE; goto out; } memset(&params, 0, sizeof(params)); params.opaque_fid = vf->opaque_fid; params.vport_id = vf->vport_id; params.rss_params = NULL; /* Search for extended tlvs list and update values * from VF in struct qed_sp_vport_update_params. */ qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask); qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask); qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask); qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask); qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask); qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask); qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params, &sge_tpa_params, mbx, &tlvs_mask); tlvs_accepted = tlvs_mask; /* Some of the extended TLVs need to be validated first; In that case, * they can update the mask without updating the accepted [so that * PF could communicate to VF it has rejected request]. */ qed_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params, mbx, &tlvs_mask, &tlvs_accepted); if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id, &params, &tlvs_accepted)) { tlvs_accepted = 0; status = PFVF_STATUS_NOT_SUPPORTED; goto out; } if (!tlvs_accepted) { if (tlvs_mask) DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Upper-layer prevents VF vport configuration\n"); else DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No feature tlvs found for vport update\n"); status = PFVF_STATUS_NOT_SUPPORTED; goto out; } rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL); if (rc) status = PFVF_STATUS_FAILURE; out: vfree(p_rss_params); length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, tlvs_mask, tlvs_accepted); qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); } static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_filter_ucast *p_params) { int i; /* First remove entries and then add new ones */ if (p_params->opcode == QED_FILTER_REMOVE) { for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) if (p_vf->shadow_config.vlans[i].used && p_vf->shadow_config.vlans[i].vid == p_params->vlan) { p_vf->shadow_config.vlans[i].used = false; break; } if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF [%d] - Tries to remove a non-existing vlan\n", p_vf->relative_vf_id); return -EINVAL; } } else if (p_params->opcode == QED_FILTER_REPLACE || p_params->opcode == QED_FILTER_FLUSH) { for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) p_vf->shadow_config.vlans[i].used = false; } /* In forced mode, we're willing to remove entries - but we don't add * new ones. */ if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)) return 0; if (p_params->opcode == QED_FILTER_ADD || p_params->opcode == QED_FILTER_REPLACE) { for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { if (p_vf->shadow_config.vlans[i].used) continue; p_vf->shadow_config.vlans[i].used = true; p_vf->shadow_config.vlans[i].vid = p_params->vlan; break; } if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF [%d] - Tries to configure more than %d vlan filters\n", p_vf->relative_vf_id, QED_ETH_VF_NUM_VLAN_FILTERS + 1); return -EINVAL; } } return 0; } static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_filter_ucast *p_params) { int i; /* If we're in forced-mode, we don't allow any change */ if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) return 0; /* Don't keep track of shadow copy since we don't intend to restore. */ if (p_vf->p_vf_info.is_trusted_configured) return 0; /* First remove entries and then add new ones */ if (p_params->opcode == QED_FILTER_REMOVE) { for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { if (ether_addr_equal(p_vf->shadow_config.macs[i], p_params->mac)) { eth_zero_addr(p_vf->shadow_config.macs[i]); break; } } if (i == QED_ETH_VF_NUM_MAC_FILTERS) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "MAC isn't configured\n"); return -EINVAL; } } else if (p_params->opcode == QED_FILTER_REPLACE || p_params->opcode == QED_FILTER_FLUSH) { for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) eth_zero_addr(p_vf->shadow_config.macs[i]); } /* List the new MAC address */ if (p_params->opcode != QED_FILTER_ADD && p_params->opcode != QED_FILTER_REPLACE) return 0; for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) { ether_addr_copy(p_vf->shadow_config.macs[i], p_params->mac); DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Added MAC at %d entry in shadow\n", i); break; } } if (i == QED_ETH_VF_NUM_MAC_FILTERS) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n"); return -EINVAL; } return 0; } static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_filter_ucast *p_params) { int rc = 0; if (p_params->type == QED_FILTER_MAC) { rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); if (rc) return rc; } if (p_params->type == QED_FILTER_VLAN) rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); return rc; } static int qed_iov_chk_ucast(struct qed_hwfn *hwfn, int vfid, struct qed_filter_ucast *params) { struct qed_public_vf_info *vf; vf = qed_iov_get_public_vf_info(hwfn, vfid, true); if (!vf) return -EINVAL; /* No real decision to make; Store the configured MAC */ if (params->type == QED_FILTER_MAC || params->type == QED_FILTER_MAC_VLAN) { ether_addr_copy(vf->mac, params->mac); if (vf->is_trusted_configured) { qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid); /* Update and post bulleitin again */ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); } } return 0; } static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct vfpf_ucast_filter_tlv *req; u8 status = PFVF_STATUS_SUCCESS; struct qed_filter_ucast params; int rc; /* Prepare the unicast filter params */ memset(&params, 0, sizeof(struct qed_filter_ucast)); req = &mbx->req_virt->ucast_filter; params.opcode = (enum qed_filter_opcode)req->opcode; params.type = (enum qed_filter_ucast_type)req->type; params.is_rx_filter = 1; params.is_tx_filter = 1; params.vport_to_remove_from = vf->vport_id; params.vport_to_add_to = vf->vport_id; memcpy(params.mac, req->mac, ETH_ALEN); params.vlan = req->vlan; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %pM, vlan 0x%04x\n", vf->abs_vf_id, params.opcode, params.type, params.is_rx_filter ? "RX" : "", params.is_tx_filter ? "TX" : "", params.vport_to_add_to, params.mac, params.vlan); if (!vf->vport_instance) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No VPORT instance available for VF[%d], failing ucast MAC configuration\n", vf->abs_vf_id); status = PFVF_STATUS_FAILURE; goto out; } /* Update shadow copy of the VF configuration */ if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) { status = PFVF_STATUS_FAILURE; goto out; } /* Determine if the unicast filtering is acceptible by PF */ if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) && (params.type == QED_FILTER_VLAN || params.type == QED_FILTER_MAC_VLAN)) { /* Once VLAN is forced or PVID is set, do not allow * to add/replace any further VLANs. */ if (params.opcode == QED_FILTER_ADD || params.opcode == QED_FILTER_REPLACE) status = PFVF_STATUS_FORCED; goto out; } if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) && (params.type == QED_FILTER_MAC || params.type == QED_FILTER_MAC_VLAN)) { if (!ether_addr_equal(p_bulletin->mac, params.mac) || (params.opcode != QED_FILTER_ADD && params.opcode != QED_FILTER_REPLACE)) status = PFVF_STATUS_FORCED; goto out; } rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params); if (rc) { status = PFVF_STATUS_FAILURE; goto out; } rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params, QED_SPQ_MODE_CB, NULL); if (rc) status = PFVF_STATUS_FAILURE; out: qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, sizeof(struct pfvf_def_resp_tlv), status); } static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { int i; /* Reset the SBs */ for (i = 0; i < vf->num_sbs; i++) qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, vf->igu_sbs[i], vf->opaque_fid, false); qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, sizeof(struct pfvf_def_resp_tlv), PFVF_STATUS_SUCCESS); } static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { u16 length = sizeof(struct pfvf_def_resp_tlv); u8 status = PFVF_STATUS_SUCCESS; /* Disable Interrupts for VF */ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); /* Reset Permission table */ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, length, status); } static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *p_vf) { u16 length = sizeof(struct pfvf_def_resp_tlv); u8 status = PFVF_STATUS_SUCCESS; int rc = 0; qed_iov_vf_cleanup(p_hwfn, p_vf); if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { /* Stopping the VF */ rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid, p_vf->opaque_fid); if (rc) { DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", rc); status = PFVF_STATUS_FAILURE; } p_vf->state = VF_STOPPED; } qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, length, status); } static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *p_vf) { struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; struct pfvf_read_coal_resp_tlv *p_resp; struct vfpf_read_coal_req_tlv *req; u8 status = PFVF_STATUS_FAILURE; struct qed_vf_queue *p_queue; struct qed_queue_cid *p_cid; u16 coal = 0, qid, i; bool b_is_rx; int rc = 0; mbx->offset = (u8 *)mbx->reply_virt; req = &mbx->req_virt->read_coal_req; qid = req->qid; b_is_rx = req->is_rx ? true : false; if (b_is_rx) { if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid, QED_IOV_VALIDATE_Q_ENABLE)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d]: Invalid Rx queue_id = %d\n", p_vf->abs_vf_id, qid); goto send_resp; } p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]); rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal); if (rc) goto send_resp; } else { if (!qed_iov_validate_txq(p_hwfn, p_vf, qid, QED_IOV_VALIDATE_Q_ENABLE)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d]: Invalid Tx queue_id = %d\n", p_vf->abs_vf_id, qid); goto send_resp; } for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { p_queue = &p_vf->vf_queues[qid]; if ((!p_queue->cids[i].p_cid) || (!p_queue->cids[i].b_is_tx)) continue; p_cid = p_queue->cids[i].p_cid; rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal); if (rc) goto send_resp; break; } } status = PFVF_STATUS_SUCCESS; send_resp: p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ, sizeof(*p_resp)); p_resp->coal = coal; qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); } static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; struct vfpf_update_coalesce *req; u8 status = PFVF_STATUS_FAILURE; struct qed_queue_cid *p_cid; u16 rx_coal, tx_coal; int rc = 0, i; u16 qid; req = &mbx->req_virt->update_coalesce; rx_coal = req->rx_coal; tx_coal = req->tx_coal; qid = req->qid; if (!qed_iov_validate_rxq(p_hwfn, vf, qid, QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d]: Invalid Rx queue_id = %d\n", vf->abs_vf_id, qid); goto out; } if (!qed_iov_validate_txq(p_hwfn, vf, qid, QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d]: Invalid Tx queue_id = %d\n", vf->abs_vf_id, qid); goto out; } DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n", vf->abs_vf_id, rx_coal, tx_coal, qid); if (rx_coal) { p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]); rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d]: Unable to set rx queue = %d coalesce\n", vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); goto out; } vf->rx_coal = rx_coal; } if (tx_coal) { struct qed_vf_queue *p_queue = &vf->vf_queues[qid]; for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { if (!p_queue->cids[i].p_cid) continue; if (!p_queue->cids[i].b_is_tx) continue; rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_queue->cids[i].p_cid); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d]: Unable to set tx queue coalesce\n", vf->abs_vf_id); goto out; } } vf->tx_coal = tx_coal; } status = PFVF_STATUS_SUCCESS; out: qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE, sizeof(struct pfvf_def_resp_tlv), status); } static int qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) { int cnt; u32 val; qed_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid); for (cnt = 0; cnt < 50; cnt++) { val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); if (!val) break; msleep(20); } qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); if (cnt == 50) { DP_ERR(p_hwfn, "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", p_vf->abs_vf_id, val); return -EBUSY; } return 0; } #define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS) static int qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) { u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp; u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port; u8 max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0; u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0; u8 port_id, tc, tc_id = 0, voq = 0; int cnt; memset(cons, 0, MAX_NUM_EXT_VOQS * sizeof(u32)); memset(distance, 0, MAX_NUM_EXT_VOQS * sizeof(u32)); /* Read initial consumers & producers */ for (port_id = 0; port_id < max_ports_per_engine; port_id++) { /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */ for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) { tc_id = (tc < max_phys_tcs_per_port) ? tc : PURE_LB_TC; voq = VOQ(port_id, tc_id, max_phys_tcs_per_port); cons[voq] = qed_rd(p_hwfn, p_ptt, cons_voq0_addr + voq * 0x40); prod = qed_rd(p_hwfn, p_ptt, prod_voq0_addr + voq * 0x40); distance[voq] = prod - cons[voq]; } } /* Wait for consumers to pass the producers */ port_id = 0; tc = 0; for (cnt = 0; cnt < 50; cnt++) { for (; port_id < max_ports_per_engine; port_id++) { /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */ for (; tc < max_phys_tcs_per_port + 1; tc++) { tc_id = (tc < max_phys_tcs_per_port) ? tc : PURE_LB_TC; voq = VOQ(port_id, tc_id, max_phys_tcs_per_port); tmp = qed_rd(p_hwfn, p_ptt, cons_voq0_addr + voq * 0x40); if (distance[voq] > tmp - cons[voq]) break; } if (tc == max_phys_tcs_per_port + 1) tc = 0; else break; } if (port_id == max_ports_per_engine) break; msleep(20); } if (cnt == 50) { DP_ERR(p_hwfn, "VF[%d]: pbf poll failed on VOQ%d\n", p_vf->abs_vf_id, (int)voq); DP_ERR(p_hwfn, "VOQ %d has port_id as %d and tc_id as %d]\n", (int)voq, (int)port_id, (int)tc_id); return -EBUSY; } return 0; } static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) { int rc; rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); if (rc) return rc; rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); if (rc) return rc; return 0; } static int qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 rel_vf_id, u32 *ack_vfs) { struct qed_vf_info *p_vf; int rc = 0; p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); if (!p_vf) return 0; if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & (1ULL << (rel_vf_id % 64))) { u16 vfid = p_vf->abs_vf_id; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] - Handling FLR\n", vfid); qed_iov_vf_cleanup(p_hwfn, p_vf); /* If VF isn't active, no need for anything but SW */ if (!p_vf->b_init) goto cleanup; rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); if (rc) goto cleanup; rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true); if (rc) { DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid); return rc; } /* Workaround to make VF-PF channel ready, as FW * doesn't do that as a part of FLR. */ REG_WR(p_hwfn, GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, USTORM_VF_PF_CHANNEL_READY, vfid), 1); /* VF_STOPPED has to be set only after final cleanup * but prior to re-enabling the VF. */ p_vf->state = VF_STOPPED; rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); if (rc) { DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", vfid); return rc; } cleanup: /* Mark VF for ack and clean pending state */ if (p_vf->state == VF_RESET) p_vf->state = VF_STOPPED; ack_vfs[vfid / 32] |= BIT((vfid % 32)); p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= ~(1ULL << (rel_vf_id % 64)); p_vf->vf_mbx.b_pending_msg = false; } return rc; } static int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 ack_vfs[VF_MAX_STATIC / 32]; int rc = 0; u16 i; memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); /* Since BRB <-> PRS interface can't be tested as part of the flr * polling due to HW limitations, simply sleep a bit. And since * there's no need to wait per-vf, do it before looping. */ msleep(100); for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); return rc; } bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) { bool found = false; u16 i; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n"); for (i = 0; i < (VF_MAX_STATIC / 32); i++) DP_VERBOSE(p_hwfn, QED_MSG_IOV, "[%08x,...,%08x]: %08x\n", i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); if (!p_hwfn->cdev->p_iov_info) { DP_NOTICE(p_hwfn, "VF flr but no IOV\n"); return false; } /* Mark VFs */ for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) { struct qed_vf_info *p_vf; u8 vfid; p_vf = qed_iov_get_vf_info(p_hwfn, i, false); if (!p_vf) continue; vfid = p_vf->abs_vf_id; if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) { u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; u16 rel_vf_id = p_vf->relative_vf_id; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%d] [rel %d] got FLR-ed\n", vfid, rel_vf_id); p_vf->state = VF_RESET; /* No need to lock here, since pending_flr should * only change here and before ACKing MFw. Since * MFW will not trigger an additional attention for * VF flr until ACKs, we're safe. */ p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); found = true; } } return found; } static int qed_iov_get_link(struct qed_hwfn *p_hwfn, u16 vfid, struct qed_mcp_link_params *p_params, struct qed_mcp_link_state *p_link, struct qed_mcp_link_capabilities *p_caps) { struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, vfid, false); struct qed_bulletin_content *p_bulletin; if (!p_vf) return -EINVAL; p_bulletin = p_vf->bulletin.p_virt; if (p_params) __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin); if (p_link) __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); if (p_caps) __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); return 0; } static int qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *p_vf) { struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt; struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; struct vfpf_bulletin_update_mac_tlv *p_req; u8 status = PFVF_STATUS_SUCCESS; int rc = 0; if (!p_vf->p_vf_info.is_trusted_configured) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Blocking bulletin update request from untrusted VF[%d]\n", p_vf->abs_vf_id); status = PFVF_STATUS_NOT_SUPPORTED; rc = -EINVAL; goto send_status; } p_req = &mbx->req_virt->bulletin_update_mac; ether_addr_copy(p_bulletin->mac, p_req->mac); DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Updated bulletin of VF[%d] with requested MAC[%pM]\n", p_vf->abs_vf_id, p_req->mac); send_status: qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_BULLETIN_UPDATE_MAC, sizeof(struct pfvf_def_resp_tlv), status); return rc; } static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int vfid) { struct qed_iov_vf_mbx *mbx; struct qed_vf_info *p_vf; p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf) return; mbx = &p_vf->vf_mbx; /* qed_iov_process_mbx_request */ if (!mbx->b_pending_msg) { DP_NOTICE(p_hwfn, "VF[%02x]: Trying to process mailbox message when none is pending\n", p_vf->abs_vf_id); return; } mbx->b_pending_msg = false; mbx->first_tlv = mbx->req_virt->first_tlv; DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%02x]: Processing mailbox message [type %04x]\n", p_vf->abs_vf_id, mbx->first_tlv.tl.type); /* check if tlv type is known */ if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) && !p_vf->b_malicious) { switch (mbx->first_tlv.tl.type) { case CHANNEL_TLV_ACQUIRE: qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_VPORT_START: qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_VPORT_TEARDOWN: qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_START_RXQ: qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_START_TXQ: qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_STOP_RXQS: qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_STOP_TXQS: qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_UPDATE_RXQ: qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_VPORT_UPDATE: qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_UCAST_FILTER: qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_CLOSE: qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_INT_CLEANUP: qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_RELEASE: qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_UPDATE_TUNN_PARAM: qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_COALESCE_UPDATE: qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_COALESCE_READ: qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); break; case CHANNEL_TLV_BULLETIN_UPDATE_MAC: qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf); break; } } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n", p_vf->abs_vf_id, mbx->first_tlv.tl.type); qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, mbx->first_tlv.tl.type, sizeof(struct pfvf_def_resp_tlv), PFVF_STATUS_MALICIOUS); } else { /* unknown TLV - this may belong to a VF driver from the future * - a version written after this PF driver was written, which * supports features unknown as of yet. Too bad since we don't * support them. Or this may be because someone wrote a crappy * VF driver and is sending garbage over the channel. */ DP_NOTICE(p_hwfn, "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n", p_vf->abs_vf_id, mbx->first_tlv.tl.type, mbx->first_tlv.tl.length, mbx->first_tlv.padding, mbx->first_tlv.reply_address); /* Try replying in case reply address matches the acquisition's * posted address. */ if (p_vf->acquire.first_tlv.reply_address && (mbx->first_tlv.reply_address == p_vf->acquire.first_tlv.reply_address)) { qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, mbx->first_tlv.tl.type, sizeof(struct pfvf_def_resp_tlv), PFVF_STATUS_NOT_SUPPORTED); } else { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF[%02x]: Can't respond to TLV - no valid reply address\n", p_vf->abs_vf_id); } } } static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) { int i; memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); qed_for_each_vf(p_hwfn, i) { struct qed_vf_info *p_vf; p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; if (p_vf->vf_mbx.b_pending_msg) events[i / 64] |= 1ULL << (i % 64); } } static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, u16 abs_vfid) { u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n", abs_vfid); return NULL; } return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; } static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, u16 abs_vfid, struct regpair *vf_msg) { struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn, abs_vfid); if (!p_vf) return 0; /* List the physical address of the request so that handler * could later on copy the message from it. */ p_vf->vf_mbx.pending_req = HILO_64(vf_msg->hi, vf_msg->lo); /* Mark the event and schedule the workqueue */ p_vf->vf_mbx.b_pending_msg = true; qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); return 0; } void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, struct fw_err_data *p_data) { struct qed_vf_info *p_vf; p_vf = qed_sriov_get_vf_from_absid(p_hwfn, qed_vf_from_entity_id (p_data->entity_id)); if (!p_vf) return; if (!p_vf->b_malicious) { DP_NOTICE(p_hwfn, "VF [%d] - Malicious behavior [%02x]\n", p_vf->abs_vf_id, p_data->err_id); p_vf->b_malicious = true; } else { DP_INFO(p_hwfn, "VF [%d] - Malicious behavior [%02x]\n", p_vf->abs_vf_id, p_data->err_id); } } int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, union event_ring_data *data, u8 fw_return_code) { switch (opcode) { case COMMON_EVENT_VF_PF_CHANNEL: return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), &data->vf_pf_channel.msg_addr); default: DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", opcode); return -EINVAL; } } u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; u16 i; if (!p_iov) goto out; for (i = rel_vf_id; i < p_iov->total_vfs; i++) if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false)) return i; out: return MAX_NUM_VFS; } static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, int vfid) { struct qed_dmae_params params; struct qed_vf_info *vf_info; vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) return -EINVAL; memset(&params, 0, sizeof(params)); SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1); SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1); params.src_vfid = vf_info->abs_vf_id; if (qed_dmae_host2host(p_hwfn, ptt, vf_info->vf_mbx.pending_req, vf_info->vf_mbx.req_phys, sizeof(union vfpf_tlvs) / 4, &params)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Failed to copy message from VF 0x%02x\n", vfid); return -EIO; } return 0; } static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid) { struct qed_vf_info *vf_info; u64 feature; vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) { DP_NOTICE(p_hwfn->cdev, "Can not set forced MAC, invalid vfid [%d]\n", vfid); return; } if (vf_info->b_malicious) { DP_NOTICE(p_hwfn->cdev, "Can't set forced MAC to malicious VF [%d]\n", vfid); return; } if (vf_info->p_vf_info.is_trusted_configured) { feature = BIT(VFPF_BULLETIN_MAC_ADDR); /* Trust mode will disable Forced MAC */ vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(MAC_ADDR_FORCED); } else { feature = BIT(MAC_ADDR_FORCED); /* Forced MAC will disable MAC_ADDR */ vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR); } memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); vf_info->bulletin.p_virt->valid_bitmap |= feature; qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); } static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid) { struct qed_vf_info *vf_info; u64 feature; vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) { DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n", vfid); return -EINVAL; } if (vf_info->b_malicious) { DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n", vfid); return -EINVAL; } if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Can not set MAC, Forced MAC is configured\n"); return -EINVAL; } feature = BIT(VFPF_BULLETIN_MAC_ADDR); ether_addr_copy(vf_info->bulletin.p_virt->mac, mac); vf_info->bulletin.p_virt->valid_bitmap |= feature; if (vf_info->p_vf_info.is_trusted_configured) qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); return 0; } static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, u16 pvid, int vfid) { struct qed_vf_info *vf_info; u64 feature; vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) { DP_NOTICE(p_hwfn->cdev, "Can not set forced MAC, invalid vfid [%d]\n", vfid); return; } if (vf_info->b_malicious) { DP_NOTICE(p_hwfn->cdev, "Can't set forced vlan to malicious VF [%d]\n", vfid); return; } feature = 1 << VLAN_ADDR_FORCED; vf_info->bulletin.p_virt->pvid = pvid; if (pvid) vf_info->bulletin.p_virt->valid_bitmap |= feature; else vf_info->bulletin.p_virt->valid_bitmap &= ~feature; qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); } void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid, u16 vxlan_port, u16 geneve_port) { struct qed_vf_info *vf_info; vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) { DP_NOTICE(p_hwfn->cdev, "Can not set udp ports, invalid vfid [%d]\n", vfid); return; } if (vf_info->b_malicious) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Can not set udp ports to malicious VF [%d]\n", vfid); return; } vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port; vf_info->bulletin.p_virt->geneve_udp_port = geneve_port; } static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) { struct qed_vf_info *p_vf_info; p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf_info) return false; return !!p_vf_info->vport_instance; } static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) { struct qed_vf_info *p_vf_info; p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf_info) return true; return p_vf_info->state == VF_STOPPED; } static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) { struct qed_vf_info *vf_info; vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) return false; return vf_info->spoof_chk; } static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) { struct qed_vf_info *vf; int rc = -EINVAL; if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { DP_NOTICE(p_hwfn, "SR-IOV sanity check failed, can't set spoofchk\n"); goto out; } vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf) goto out; if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) { /* After VF VPORT start PF will configure spoof check */ vf->req_spoofchk_val = val; rc = 0; goto out; } rc = __qed_iov_spoofchk_set(p_hwfn, vf, val); out: return rc; } static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id) { struct qed_vf_info *p_vf; p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); if (!p_vf || !p_vf->bulletin.p_virt) return NULL; if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VFPF_BULLETIN_MAC_ADDR))) return NULL; return p_vf->bulletin.p_virt->mac; } static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id) { struct qed_vf_info *p_vf; p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); if (!p_vf || !p_vf->bulletin.p_virt) return NULL; if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) return NULL; return p_vf->bulletin.p_virt->mac; } static u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) { struct qed_vf_info *p_vf; p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); if (!p_vf || !p_vf->bulletin.p_virt) return 0; if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))) return 0; return p_vf->bulletin.p_virt->pvid; } static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int vfid, int val) { struct qed_vf_info *vf; u8 abs_vp_id = 0; u16 rl_id; int rc; vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf) return -EINVAL; rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); if (rc) return rc; rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */ return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val, QM_RL_TYPE_NORMAL); } static int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) { struct qed_vf_info *vf; u8 vport_id; int i; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { DP_NOTICE(p_hwfn, "SR-IOV sanity check failed, can't set min rate\n"); return -EINVAL; } } vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true); if (!vf) return -EINVAL; vport_id = vf->vport_id; return qed_configure_vport_wfq(cdev, vport_id, rate); } static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) { struct qed_wfq_data *vf_vp_wfq; struct qed_vf_info *vf_info; vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) return 0; vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; if (vf_vp_wfq->configured) return vf_vp_wfq->min_speed; else return 0; } /** * qed_schedule_iov - schedules IOV task for VF and PF * @hwfn: hardware function pointer * @flag: IOV flag for VF/PF */ void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) { /* Memory barrier for setting atomic bit */ smp_mb__before_atomic(); set_bit(flag, &hwfn->iov_task_flags); /* Memory barrier after setting atomic bit */ smp_mb__after_atomic(); DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); } void qed_vf_start_iov_wq(struct qed_dev *cdev) { int i; for_each_hwfn(cdev, i) queue_delayed_work(cdev->hwfns[i].iov_wq, &cdev->hwfns[i].iov_task, 0); } int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) { int i, j; for_each_hwfn(cdev, i) if (cdev->hwfns[i].iov_wq) flush_workqueue(cdev->hwfns[i].iov_wq); /* Mark VFs for disablement */ qed_iov_set_vfs_to_disable(cdev, true); if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) pci_disable_sriov(cdev->pdev); if (cdev->recov_in_prog) { DP_VERBOSE(cdev, QED_MSG_IOV, "Skip SRIOV disable operations in the device since a recovery is in progress\n"); goto out; } for_each_hwfn(cdev, i) { struct qed_hwfn *hwfn = &cdev->hwfns[i]; struct qed_ptt *ptt = qed_ptt_acquire(hwfn); /* Failure to acquire the ptt in 100g creates an odd error * where the first engine has already relased IOV. */ if (!ptt) { DP_ERR(hwfn, "Failed to acquire ptt\n"); return -EBUSY; } /* Clean WFQ db and configure equal weight for all vports */ qed_clean_wfq_db(hwfn, ptt); qed_for_each_vf(hwfn, j) { int k; if (!qed_iov_is_valid_vfid(hwfn, j, true, false)) continue; /* Wait until VF is disabled before releasing */ for (k = 0; k < 100; k++) { if (!qed_iov_is_vf_stopped(hwfn, j)) msleep(20); else break; } if (k < 100) qed_iov_release_hw_for_vf(&cdev->hwfns[i], ptt, j); else DP_ERR(hwfn, "Timeout waiting for VF's FLR to end\n"); } qed_ptt_release(hwfn, ptt); } out: qed_iov_set_vfs_to_disable(cdev, false); return 0; } static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, u16 vfid, struct qed_iov_vf_init_params *params) { u16 base, i; /* Since we have an equal resource distribution per-VF, and we assume * PF has acquired the QED_PF_L2_QUE first queues, we start setting * sequentially from there. */ base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues; params->rel_vf_id = vfid; for (i = 0; i < params->num_queues; i++) { params->req_rx_queue[i] = base + i; params->req_tx_queue[i] = base + i; } } static int qed_sriov_enable(struct qed_dev *cdev, int num) { struct qed_iov_vf_init_params params; struct qed_hwfn *hwfn; struct qed_ptt *ptt; int i, j, rc; if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { DP_NOTICE(cdev, "Can start at most %d VFs\n", RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1); return -EINVAL; } memset(&params, 0, sizeof(params)); /* Initialize HW for VF access */ for_each_hwfn(cdev, j) { hwfn = &cdev->hwfns[j]; ptt = qed_ptt_acquire(hwfn); /* Make sure not to use more than 16 queues per VF */ params.num_queues = min_t(int, FEAT_NUM(hwfn, QED_VF_L2_QUE) / num, 16); if (!ptt) { DP_ERR(hwfn, "Failed to acquire ptt\n"); rc = -EBUSY; goto err; } for (i = 0; i < num; i++) { if (!qed_iov_is_valid_vfid(hwfn, i, false, true)) continue; qed_sriov_enable_qid_config(hwfn, i, &params); rc = qed_iov_init_hw_for_vf(hwfn, ptt, &params); if (rc) { DP_ERR(cdev, "Failed to enable VF[%d]\n", i); qed_ptt_release(hwfn, ptt); goto err; } } qed_ptt_release(hwfn, ptt); } /* Enable SRIOV PCIe functions */ rc = pci_enable_sriov(cdev->pdev, num); if (rc) { DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc); goto err; } hwfn = QED_LEADING_HWFN(cdev); ptt = qed_ptt_acquire(hwfn); if (!ptt) { DP_ERR(hwfn, "Failed to acquire ptt\n"); rc = -EBUSY; goto err; } rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB); if (rc) DP_INFO(cdev, "Failed to update eswitch mode\n"); qed_ptt_release(hwfn, ptt); return num; err: qed_sriov_disable(cdev, false); return rc; } static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) { if (!IS_QED_SRIOV(cdev)) { DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n"); return -EOPNOTSUPP; } if (num_vfs_param) return qed_sriov_enable(cdev, num_vfs_param); else return qed_sriov_disable(cdev, true); } static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) { int i; if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { DP_VERBOSE(cdev, QED_MSG_IOV, "Cannot set a VF MAC; Sriov is not enabled\n"); return -EINVAL; } if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { DP_VERBOSE(cdev, QED_MSG_IOV, "Cannot set VF[%d] MAC (VF is not active)\n", vfid); return -EINVAL; } for_each_hwfn(cdev, i) { struct qed_hwfn *hwfn = &cdev->hwfns[i]; struct qed_public_vf_info *vf_info; vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); if (!vf_info) continue; /* Set the MAC, and schedule the IOV task */ if (vf_info->is_trusted_configured) ether_addr_copy(vf_info->mac, mac); else ether_addr_copy(vf_info->forced_mac, mac); qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); } return 0; } static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) { int i; if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { DP_VERBOSE(cdev, QED_MSG_IOV, "Cannot set a VF MAC; Sriov is not enabled\n"); return -EINVAL; } if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { DP_VERBOSE(cdev, QED_MSG_IOV, "Cannot set VF[%d] MAC (VF is not active)\n", vfid); return -EINVAL; } for_each_hwfn(cdev, i) { struct qed_hwfn *hwfn = &cdev->hwfns[i]; struct qed_public_vf_info *vf_info; vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); if (!vf_info) continue; /* Set the forced vlan, and schedule the IOV task */ vf_info->forced_vlan = vid; qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); } return 0; } static int qed_get_vf_config(struct qed_dev *cdev, int vf_id, struct ifla_vf_info *ivi) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_public_vf_info *vf_info; struct qed_mcp_link_state link; u32 tx_rate; int ret; /* Sanitize request */ if (IS_VF(cdev)) return -EINVAL; if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) { DP_VERBOSE(cdev, QED_MSG_IOV, "VF index [%d] isn't active\n", vf_id); return -EINVAL; } vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); if (ret) return ret; /* Fill information about VF */ ivi->vf = vf_id; if (is_valid_ether_addr(vf_info->forced_mac)) ether_addr_copy(ivi->mac, vf_info->forced_mac); else ether_addr_copy(ivi->mac, vf_info->mac); ivi->vlan = vf_info->forced_vlan; ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id); ivi->linkstate = vf_info->link_state; tx_rate = vf_info->tx_rate; ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); ivi->trusted = vf_info->is_trusted_request; return 0; } void qed_inform_vf_link_state(struct qed_hwfn *hwfn) { struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev); struct qed_mcp_link_capabilities caps; struct qed_mcp_link_params params; struct qed_mcp_link_state link; int i; if (!hwfn->pf_iov_info) return; /* Update bulletin of all future possible VFs with link configuration */ for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { struct qed_public_vf_info *vf_info; vf_info = qed_iov_get_public_vf_info(hwfn, i, false); if (!vf_info) continue; /* Only hwfn0 is actually interested in the link speed. * But since only it would receive an MFW indication of link, * need to take configuration from it - otherwise things like * rate limiting for hwfn1 VF would not work. */ memcpy(&params, qed_mcp_get_link_params(lead_hwfn), sizeof(params)); memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link)); memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn), sizeof(caps)); /* Modify link according to the VF's configured link state */ switch (vf_info->link_state) { case IFLA_VF_LINK_STATE_DISABLE: link.link_up = false; break; case IFLA_VF_LINK_STATE_ENABLE: link.link_up = true; /* Set speed according to maximum supported by HW. * that is 40G for regular devices and 100G for CMT * mode devices. */ link.speed = (hwfn->cdev->num_hwfns > 1) ? 100000 : 40000; break; default: /* In auto mode pass PF link image to VF */ break; } if (link.link_up && vf_info->tx_rate) { struct qed_ptt *ptt; int rate; rate = min_t(int, vf_info->tx_rate, link.speed); ptt = qed_ptt_acquire(hwfn); if (!ptt) { DP_NOTICE(hwfn, "Failed to acquire PTT\n"); return; } if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) { vf_info->tx_rate = rate; link.speed = rate; } qed_ptt_release(hwfn, ptt); } qed_iov_set_link(hwfn, i, &params, &link, &caps); } qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); } static int qed_set_vf_link_state(struct qed_dev *cdev, int vf_id, int link_state) { int i; /* Sanitize request */ if (IS_VF(cdev)) return -EINVAL; if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) { DP_VERBOSE(cdev, QED_MSG_IOV, "VF index [%d] isn't active\n", vf_id); return -EINVAL; } /* Handle configuration of link state */ for_each_hwfn(cdev, i) { struct qed_hwfn *hwfn = &cdev->hwfns[i]; struct qed_public_vf_info *vf; vf = qed_iov_get_public_vf_info(hwfn, vf_id, true); if (!vf) continue; if (vf->link_state == link_state) continue; vf->link_state = link_state; qed_inform_vf_link_state(&cdev->hwfns[i]); } return 0; } static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val) { int i, rc = -EINVAL; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; rc = qed_iov_spoofchk_set(p_hwfn, vfid, val); if (rc) break; } return rc; } static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate) { int i; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_public_vf_info *vf; if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { DP_NOTICE(p_hwfn, "SR-IOV sanity check failed, can't set tx rate\n"); return -EINVAL; } vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true); vf->tx_rate = rate; qed_inform_vf_link_state(p_hwfn); } return 0; } static int qed_set_vf_rate(struct qed_dev *cdev, int vfid, u32 min_rate, u32 max_rate) { int rc_min = 0, rc_max = 0; if (max_rate) rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate); if (min_rate) rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate); if (rc_max | rc_min) return -EINVAL; return 0; } static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust) { int i; for_each_hwfn(cdev, i) { struct qed_hwfn *hwfn = &cdev->hwfns[i]; struct qed_public_vf_info *vf; if (!qed_iov_pf_sanity_check(hwfn, vfid)) { DP_NOTICE(hwfn, "SR-IOV sanity check failed, can't set trust\n"); return -EINVAL; } vf = qed_iov_get_public_vf_info(hwfn, vfid, true); if (vf->is_trusted_request == trust) return 0; vf->is_trusted_request = trust; qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG); } return 0; } static void qed_handle_vf_msg(struct qed_hwfn *hwfn) { u64 events[QED_VF_ARRAY_LENGTH]; struct qed_ptt *ptt; int i; ptt = qed_ptt_acquire(hwfn); if (!ptt) { DP_VERBOSE(hwfn, QED_MSG_IOV, "Can't acquire PTT; re-scheduling\n"); qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG); return; } qed_iov_pf_get_pending_events(hwfn, events); DP_VERBOSE(hwfn, QED_MSG_IOV, "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", events[0], events[1], events[2]); qed_for_each_vf(hwfn, i) { /* Skip VFs with no pending messages */ if (!(events[i / 64] & (1ULL << (i % 64)))) continue; DP_VERBOSE(hwfn, QED_MSG_IOV, "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); /* Copy VF's message to PF's request buffer for that VF */ if (qed_iov_copy_vf_msg(hwfn, ptt, i)) continue; qed_iov_process_mbx_req(hwfn, ptt, i); } qed_ptt_release(hwfn, ptt); } static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn, u8 *mac, struct qed_public_vf_info *info) { if (info->is_trusted_configured) { if (is_valid_ether_addr(info->mac) && (!mac || !ether_addr_equal(mac, info->mac))) return true; } else { if (is_valid_ether_addr(info->forced_mac) && (!mac || !ether_addr_equal(mac, info->forced_mac))) return true; } return false; } static void qed_set_bulletin_mac(struct qed_hwfn *hwfn, struct qed_public_vf_info *info, int vfid) { if (info->is_trusted_configured) qed_iov_bulletin_set_mac(hwfn, info->mac, vfid); else qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid); } static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) { int i; qed_for_each_vf(hwfn, i) { struct qed_public_vf_info *info; bool update = false; u8 *mac; info = qed_iov_get_public_vf_info(hwfn, i, true); if (!info) continue; /* Update data on bulletin board */ if (info->is_trusted_configured) mac = qed_iov_bulletin_get_mac(hwfn, i); else mac = qed_iov_bulletin_get_forced_mac(hwfn, i); if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) { DP_VERBOSE(hwfn, QED_MSG_IOV, "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n", i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); /* Update bulletin board with MAC */ qed_set_bulletin_mac(hwfn, info, i); update = true; } if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^ info->forced_vlan) { DP_VERBOSE(hwfn, QED_MSG_IOV, "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n", info->forced_vlan, i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); qed_iov_bulletin_set_forced_vlan(hwfn, info->forced_vlan, i); update = true; } if (update) qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); } } static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) { struct qed_ptt *ptt; int i; ptt = qed_ptt_acquire(hwfn); if (!ptt) { DP_NOTICE(hwfn, "Failed allocating a ptt entry\n"); qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); return; } qed_for_each_vf(hwfn, i) qed_iov_post_vf_bulletin(hwfn, i, ptt); qed_ptt_release(hwfn, ptt); } static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id) { struct qed_public_vf_info *vf_info; struct qed_vf_info *vf; u8 *force_mac; int i; vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); vf = qed_iov_get_vf_info(hwfn, vf_id, true); if (!vf_info || !vf) return; /* Force MAC converted to generic MAC in case of VF trust on */ if (vf_info->is_trusted_configured && (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) { force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id); if (force_mac) { /* Clear existing shadow copy of MAC to have a clean * slate. */ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { if (ether_addr_equal(vf->shadow_config.macs[i], vf_info->mac)) { eth_zero_addr(vf->shadow_config.macs[i]); DP_VERBOSE(hwfn, QED_MSG_IOV, "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n", vf_info->mac, vf_id); break; } } ether_addr_copy(vf_info->mac, force_mac); eth_zero_addr(vf_info->forced_mac); vf->bulletin.p_virt->valid_bitmap &= ~BIT(MAC_ADDR_FORCED); qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); } } /* Update shadow copy with VF MAC when trust mode is turned off */ if (!vf_info->is_trusted_configured) { u8 empty_mac[ETH_ALEN]; eth_zero_addr(empty_mac); for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { if (ether_addr_equal(vf->shadow_config.macs[i], empty_mac)) { ether_addr_copy(vf->shadow_config.macs[i], vf_info->mac); DP_VERBOSE(hwfn, QED_MSG_IOV, "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n", vf_info->mac, vf_id); break; } } /* Clear bulletin when trust mode is turned off, * to have a clean slate for next (normal) operations. */ qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id); qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); } } static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) { struct qed_sp_vport_update_params params; struct qed_filter_accept_flags *flags; struct qed_public_vf_info *vf_info; struct qed_vf_info *vf; u8 mask; int i; mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; flags = &params.accept_flags; qed_for_each_vf(hwfn, i) { /* Need to make sure current requested configuration didn't * flip so that we'll end up configuring something that's not * needed. */ vf_info = qed_iov_get_public_vf_info(hwfn, i, true); if (vf_info->is_trusted_configured == vf_info->is_trusted_request) continue; vf_info->is_trusted_configured = vf_info->is_trusted_request; /* Handle forced MAC mode */ qed_update_mac_for_vf_trust_change(hwfn, i); /* Validate that the VF has a configured vport */ vf = qed_iov_get_vf_info(hwfn, i, true); if (!vf || !vf->vport_instance) continue; memset(&params, 0, sizeof(params)); params.opaque_fid = vf->opaque_fid; params.vport_id = vf->vport_id; params.update_ctl_frame_check = 1; params.mac_chk_en = !vf_info->is_trusted_configured; params.update_accept_any_vlan_flg = 0; if (vf_info->accept_any_vlan && vf_info->forced_vlan) { params.update_accept_any_vlan_flg = 1; params.accept_any_vlan = vf_info->accept_any_vlan; } if (vf_info->rx_accept_mode & mask) { flags->update_rx_mode_config = 1; flags->rx_accept_filter = vf_info->rx_accept_mode; } if (vf_info->tx_accept_mode & mask) { flags->update_tx_mode_config = 1; flags->tx_accept_filter = vf_info->tx_accept_mode; } /* Remove if needed; Otherwise this would set the mask */ if (!vf_info->is_trusted_configured) { flags->rx_accept_filter &= ~mask; flags->tx_accept_filter &= ~mask; params.accept_any_vlan = false; } if (flags->update_rx_mode_config || flags->update_tx_mode_config || params.update_ctl_frame_check || params.update_accept_any_vlan_flg) { DP_VERBOSE(hwfn, QED_MSG_IOV, "vport update config for %s VF[abs 0x%x rel 0x%x]\n", vf_info->is_trusted_configured ? "trusted" : "untrusted", vf->abs_vf_id, vf->relative_vf_id); qed_sp_vport_update(hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL); } } } static void qed_iov_pf_task(struct work_struct *work) { struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, iov_task.work); int rc; if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) return; if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) { struct qed_ptt *ptt = qed_ptt_acquire(hwfn); if (!ptt) { qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); return; } rc = qed_iov_vf_flr_cleanup(hwfn, ptt); if (rc) qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); qed_ptt_release(hwfn, ptt); } if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) qed_handle_vf_msg(hwfn); if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, &hwfn->iov_task_flags)) qed_handle_pf_set_vf_unicast(hwfn); if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, &hwfn->iov_task_flags)) qed_handle_bulletin_post(hwfn); if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags)) qed_iov_handle_trust_change(hwfn); } void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) { int i; for_each_hwfn(cdev, i) { if (!cdev->hwfns[i].iov_wq) continue; if (schedule_first) { qed_schedule_iov(&cdev->hwfns[i], QED_IOV_WQ_STOP_WQ_FLAG); cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); } destroy_workqueue(cdev->hwfns[i].iov_wq); } } int qed_iov_wq_start(struct qed_dev *cdev) { char name[NAME_SIZE]; int i; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; /* PFs needs a dedicated workqueue only if they support IOV. * VFs always require one. */ if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn)) continue; snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x", cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id); p_hwfn->iov_wq = create_singlethread_workqueue(name); if (!p_hwfn->iov_wq) { DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n"); return -ENOMEM; } if (IS_PF(cdev)) INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); else INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task); } return 0; } const struct qed_iov_hv_ops qed_iov_ops_pass = { .configure = &qed_sriov_configure, .set_mac = &qed_sriov_pf_set_mac, .set_vlan = &qed_sriov_pf_set_vlan, .get_config = &qed_get_vf_config, .set_link_state = &qed_set_vf_link_state, .set_spoof = &qed_spoof_configure, .set_rate = &qed_set_vf_rate, .set_trust = &qed_set_vf_trust, };
linux-master
drivers/net/ethernet/qlogic/qed/qed_sriov.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/log2.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/string.h> #include "qed.h" #include "qed_cxt.h" #include "qed_dev_api.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_rdma.h" #include "qed_reg_addr.h" #include "qed_sriov.h" /* QM constants */ #define QM_PQ_ELEMENT_SIZE 4 /* in bytes */ /* Doorbell-Queue constants */ #define DQ_RANGE_SHIFT 4 #define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT) /* Searcher constants */ #define SRC_MIN_NUM_ELEMS 256 /* Timers constants */ #define TM_SHIFT 7 #define TM_ALIGN BIT(TM_SHIFT) #define TM_ELEM_SIZE 4 #define ILT_DEFAULT_HW_P_SIZE 4 #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET /* ILT entry structure */ #define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12) #define ILT_ENTRY_PHY_ADDR_SHIFT 0 #define ILT_ENTRY_VALID_MASK 0x1ULL #define ILT_ENTRY_VALID_SHIFT 52 #define ILT_ENTRY_IN_REGS 2 #define ILT_REG_SIZE_IN_BYTES 4 /* connection context union */ union conn_context { struct core_conn_context core_ctx; struct eth_conn_context eth_ctx; struct iscsi_conn_context iscsi_ctx; struct fcoe_conn_context fcoe_ctx; struct roce_conn_context roce_ctx; }; /* TYPE-0 task context - iSCSI, FCOE */ union type0_task_context { struct iscsi_task_context iscsi_ctx; struct fcoe_task_context fcoe_ctx; }; /* TYPE-1 task context - ROCE */ union type1_task_context { struct rdma_task_context roce_ctx; }; struct src_ent { __u8 opaque[56]; __be64 next; }; #define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ #define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12) #define CONN_CXT_SIZE(p_hwfn) \ ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) #define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context)) #define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context)) #define TYPE0_TASK_CXT_SIZE(p_hwfn) \ ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn) /* Alignment is inherent to the type1_task_context structure */ #define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context) static bool src_proto(enum protocol_type type) { return type == PROTOCOLID_TCP_ULP || type == PROTOCOLID_FCOE || type == PROTOCOLID_IWARP; } static bool tm_cid_proto(enum protocol_type type) { return type == PROTOCOLID_TCP_ULP || type == PROTOCOLID_FCOE || type == PROTOCOLID_ROCE || type == PROTOCOLID_IWARP; } static bool tm_tid_proto(enum protocol_type type) { return type == PROTOCOLID_FCOE; } /* counts the iids for the CDU/CDUC ILT client configuration */ struct qed_cdu_iids { u32 pf_cids; u32 per_vf_cids; }; static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr, struct qed_cdu_iids *iids) { u32 type; for (type = 0; type < MAX_CONN_TYPES; type++) { iids->pf_cids += p_mngr->conn_cfg[type].cid_count; iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf; } } /* counts the iids for the Searcher block configuration */ struct qed_src_iids { u32 pf_cids; u32 per_vf_cids; }; static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr, struct qed_src_iids *iids) { u32 i; for (i = 0; i < MAX_CONN_TYPES; i++) { if (!src_proto(i)) continue; iids->pf_cids += p_mngr->conn_cfg[i].cid_count; iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf; } /* Add L2 filtering filters in addition */ iids->pf_cids += p_mngr->arfs_count; } /* counts the iids for the Timers block configuration */ struct qed_tm_iids { u32 pf_cids; u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */ u32 pf_tids_total; u32 per_vf_cids; u32 per_vf_tids; }; static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn, struct qed_cxt_mngr *p_mngr, struct qed_tm_iids *iids) { bool tm_vf_required = false; bool tm_required = false; int i, j; /* Timers is a special case -> we don't count how many cids require * timers but what's the max cid that will be used by the timer block. * therefore we traverse in reverse order, and once we hit a protocol * that requires the timers memory, we'll sum all the protocols up * to that one. */ for (i = MAX_CONN_TYPES - 1; i >= 0; i--) { struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i]; if (tm_cid_proto(i) || tm_required) { if (p_cfg->cid_count) tm_required = true; iids->pf_cids += p_cfg->cid_count; } if (tm_cid_proto(i) || tm_vf_required) { if (p_cfg->cids_per_vf) tm_vf_required = true; iids->per_vf_cids += p_cfg->cids_per_vf; } if (tm_tid_proto(i)) { struct qed_tid_seg *segs = p_cfg->tid_seg; /* for each segment there is at most one * protocol for which count is not 0. */ for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) iids->pf_tids[j] += segs[j].count; /* The last array elelment is for the VFs. As for PF * segments there can be only one protocol for * which this value is not 0. */ iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; } } iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN); iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN); iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN); for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) { iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN); iids->pf_tids_total += iids->pf_tids[j]; } } static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, struct qed_qm_iids *iids) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_tid_seg *segs; u32 vf_cids = 0, type, j; u32 vf_tids = 0; for (type = 0; type < MAX_CONN_TYPES; type++) { iids->cids += p_mngr->conn_cfg[type].cid_count; vf_cids += p_mngr->conn_cfg[type].cids_per_vf; segs = p_mngr->conn_cfg[type].tid_seg; /* for each segment there is at most one * protocol for which count is not 0. */ for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) iids->tids += segs[j].count; /* The last array elelment is for the VFs. As for PF * segments there can be only one protocol for * which this value is not 0. */ vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; } iids->vf_cids = vf_cids; iids->tids += vf_tids * p_mngr->vf_count; DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n", iids->cids, iids->vf_cids, iids->tids, vf_tids); } static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn, u32 seg) { struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr; u32 i; /* Find the protocol with tid count > 0 for this segment. * Note: there can only be one and this is already validated. */ for (i = 0; i < MAX_CONN_TYPES; i++) if (p_cfg->conn_cfg[i].tid_seg[seg].count) return &p_cfg->conn_cfg[i].tid_seg[seg]; return NULL; } static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs, u32 num_xrc_srqs) { struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; p_mgr->srq_count = num_srqs; p_mgr->xrc_srq_count = num_xrc_srqs; } u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn, enum ilt_clients ilt_client) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client]; return ILT_PAGE_IN_BYTES(p_cli->p_size.val); } static u32 qed_cxt_xrc_srqs_per_page(struct qed_hwfn *p_hwfn) { u32 page_size; page_size = qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM); return page_size / XRC_SRQ_CXT_SIZE; } u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; u32 total_srqs; total_srqs = p_mgr->srq_count + p_mgr->xrc_srq_count; return total_srqs; } /* set the iids count per protocol */ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 cid_count, u32 vf_cid_cnt) { struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type]; p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN); p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN); if (type == PROTOCOLID_ROCE) { u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val; u32 cxt_size = CONN_CXT_SIZE(p_hwfn); u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; u32 align = elems_per_page * DQ_RANGE_ALIGN; p_conn->cid_count = roundup(p_conn->cid_count, align); } } u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *vf_cid) { if (vf_cid) *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf; return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; } u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, enum protocol_type type) { return p_hwfn->p_cxt_mngr->acquired[type].start_cid; } u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, enum protocol_type type) { u32 cnt = 0; int i; for (i = 0; i < TASK_SEGMENTS; i++) cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count; return cnt; } static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn, enum protocol_type proto, u8 seg, u8 seg_type, u32 count, bool has_fl) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg]; p_seg->count = count; p_seg->has_fl_mem = has_fl; p_seg->type = seg_type; } static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli, struct qed_ilt_cli_blk *p_blk, u32 start_line, u32 total_size, u32 elem_size) { u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val); /* verify thatits called only once for each block */ if (p_blk->total_size) return; p_blk->total_size = total_size; p_blk->real_size_in_page = 0; if (elem_size) p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size; p_blk->start_line = start_line; } static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn, struct qed_ilt_client_cfg *p_cli, struct qed_ilt_cli_blk *p_blk, u32 *p_line, enum ilt_clients client_id) { if (!p_blk->total_size) return; if (!p_cli->active) p_cli->first.val = *p_line; p_cli->active = true; *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page); p_cli->last.val = *p_line - 1; DP_VERBOSE(p_hwfn, QED_MSG_ILT, "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n", client_id, p_cli->first.val, p_cli->last.val, p_blk->total_size, p_blk->real_size_in_page, p_blk->start_line); } static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn, enum ilt_clients ilt_client) { u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count; struct qed_ilt_client_cfg *p_cli; u32 lines_to_skip = 0; u32 cxts_per_p; if (ilt_client == ILT_CLI_CDUC) { p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) / (u32) CONN_CXT_SIZE(p_hwfn); lines_to_skip = cid_count / cxts_per_p; } return lines_to_skip; } static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg *p_cli) { p_cli->active = false; p_cli->first.val = 0; p_cli->last.val = 0; return p_cli; } static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk) { p_blk->total_size = 0; return p_blk; } static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; u32 cli_idx, blk_idx; for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) { for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++) clients[cli_idx].pf_blks[blk_idx].total_size = 0; for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++) clients[cli_idx].vf_blks[blk_idx].total_size = 0; } } int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 curr_line, total, i, task_size, line; struct qed_ilt_client_cfg *p_cli; struct qed_ilt_cli_blk *p_blk; struct qed_cdu_iids cdu_iids; struct qed_src_iids src_iids; struct qed_qm_iids qm_iids; struct qed_tm_iids tm_iids; struct qed_tid_seg *p_seg; memset(&qm_iids, 0, sizeof(qm_iids)); memset(&cdu_iids, 0, sizeof(cdu_iids)); memset(&src_iids, 0, sizeof(src_iids)); memset(&tm_iids, 0, sizeof(tm_iids)); p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT); /* Reset all ILT blocks at the beginning of ILT computing in order * to prevent memory allocation for irrelevant blocks afterwards. */ qed_cxt_ilt_blk_reset(p_hwfn); DP_VERBOSE(p_hwfn, QED_MSG_ILT, "hwfn [%d] - Set context manager starting line to be 0x%08x\n", p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line); /* CDUC */ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]); curr_line = p_mngr->pf_start_line; /* CDUC PF */ p_cli->pf_total_lines = 0; /* get the counters for the CDUC and QM clients */ qed_cxt_cdu_iids(p_mngr, &cdu_iids); p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]); total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, CONN_CXT_SIZE(p_hwfn)); qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); p_cli->pf_total_lines = curr_line - p_blk->start_line; p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn, ILT_CLI_CDUC); /* CDUC VF */ p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]); total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, CONN_CXT_SIZE(p_hwfn)); qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); p_cli->vf_total_lines = curr_line - p_blk->start_line; for (i = 1; i < p_mngr->vf_count; i++) qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); /* CDUT PF */ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]); p_cli->first.val = curr_line; /* first the 'working' task memory */ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { p_seg = qed_cxt_tid_seg_info(p_hwfn, i); if (!p_seg || p_seg->count == 0) continue; p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]); total = p_seg->count * p_mngr->task_type_size[p_seg->type]; qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, p_mngr->task_type_size[p_seg->type]); qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUT); } /* next the 'init' task memory (forced load memory) */ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { p_seg = qed_cxt_tid_seg_info(p_hwfn, i); if (!p_seg || p_seg->count == 0) continue; p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]); if (!p_seg->has_fl_mem) { /* The segment is active (total size pf 'working' * memory is > 0) but has no FL (forced-load, Init) * memory. Thus: * * 1. The total-size in the corrsponding FL block of * the ILT client is set to 0 - No ILT line are * provisioned and no ILT memory allocated. * * 2. The start-line of said block is set to the * start line of the matching working memory * block in the ILT client. This is later used to * configure the CDU segment offset registers and * results in an FL command for TIDs of this * segement behaves as regular load commands * (loading TIDs from the working memory). */ line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line; qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); continue; } total = p_seg->count * p_mngr->task_type_size[p_seg->type]; qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, p_mngr->task_type_size[p_seg->type]); qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUT); } p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line; /* CDUT VF */ p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF); if (p_seg && p_seg->count) { /* Stricly speaking we need to iterate over all VF * task segment types, but a VF has only 1 segment */ /* 'working' memory */ total = p_seg->count * p_mngr->task_type_size[p_seg->type]; p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, p_mngr->task_type_size[p_seg->type]); qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUT); /* 'init' memory */ p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]); if (!p_seg->has_fl_mem) { /* see comment above */ line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line; qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); } else { task_size = p_mngr->task_type_size[p_seg->type]; qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, task_size); qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUT); } p_cli->vf_total_lines = curr_line - p_cli->vf_blks[0].start_line; /* Now for the rest of the VFs */ for (i = 1; i < p_mngr->vf_count; i++) { p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)]; qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUT); p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]; qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUT); } } /* QM */ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]); p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); qed_cxt_qm_iids(p_hwfn, &qm_iids); total = qed_qm_pf_mem_size(qm_iids.cids, qm_iids.vf_cids, qm_iids.tids, p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs); DP_VERBOSE(p_hwfn, QED_MSG_ILT, "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n", qm_iids.cids, qm_iids.vf_cids, qm_iids.tids, p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000, QM_PQ_ELEMENT_SIZE); qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM); p_cli->pf_total_lines = curr_line - p_blk->start_line; /* SRC */ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]); qed_cxt_src_iids(p_mngr, &src_iids); /* Both the PF and VFs searcher connections are stored in the per PF * database. Thus sum the PF searcher cids and all the VFs searcher * cids. */ total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; if (total) { u32 local_max = max_t(u32, total, SRC_MIN_NUM_ELEMS); total = roundup_pow_of_two(local_max); p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * sizeof(struct src_ent), sizeof(struct src_ent)); qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_SRC); p_cli->pf_total_lines = curr_line - p_blk->start_line; } /* TM PF */ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]); qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids); total = tm_iids.pf_cids + tm_iids.pf_tids_total; if (total) { p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * TM_ELEM_SIZE, TM_ELEM_SIZE); qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_TM); p_cli->pf_total_lines = curr_line - p_blk->start_line; } /* TM VF */ total = tm_iids.per_vf_cids + tm_iids.per_vf_tids; if (total) { p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * TM_ELEM_SIZE, TM_ELEM_SIZE); qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_TM); p_cli->vf_total_lines = curr_line - p_blk->start_line; for (i = 1; i < p_mngr->vf_count; i++) qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_TM); } /* TSDM (SRQ CONTEXT) */ total = qed_cxt_get_total_srq_count(p_hwfn); if (total) { p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]); p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * SRQ_CXT_SIZE, SRQ_CXT_SIZE); qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_TSDM); p_cli->pf_total_lines = curr_line - p_blk->start_line; } *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line; if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line > RESC_NUM(p_hwfn, QED_ILT)) return -EINVAL; return 0; } u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines) { struct qed_ilt_client_cfg *p_cli; u32 excess_lines, available_lines; struct qed_cxt_mngr *p_mngr; u32 ilt_page_size, elem_size; struct qed_tid_seg *p_seg; int i; available_lines = RESC_NUM(p_hwfn, QED_ILT); excess_lines = used_lines - available_lines; if (!excess_lines) return 0; if (!QED_IS_RDMA_PERSONALITY(p_hwfn)) return 0; p_mngr = p_hwfn->p_cxt_mngr; p_cli = &p_mngr->clients[ILT_CLI_CDUT]; ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val); for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { p_seg = qed_cxt_tid_seg_info(p_hwfn, i); if (!p_seg || p_seg->count == 0) continue; elem_size = p_mngr->task_type_size[p_seg->type]; if (!elem_size) continue; return (ilt_page_size / elem_size) * excess_lines; } DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n"); return 0; } static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn) { struct qed_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2; u32 i; if (!p_t2 || !p_t2->dma_mem) return; for (i = 0; i < p_t2->num_pages; i++) if (p_t2->dma_mem[i].virt_addr) dma_free_coherent(&p_hwfn->cdev->pdev->dev, p_t2->dma_mem[i].size, p_t2->dma_mem[i].virt_addr, p_t2->dma_mem[i].phys_addr); kfree(p_t2->dma_mem); p_t2->dma_mem = NULL; } static int qed_cxt_t2_alloc_pages(struct qed_hwfn *p_hwfn, struct qed_src_t2 *p_t2, u32 total_size, u32 page_size) { void **p_virt; u32 size, i; if (!p_t2 || !p_t2->dma_mem) return -EINVAL; for (i = 0; i < p_t2->num_pages; i++) { size = min_t(u32, total_size, page_size); p_virt = &p_t2->dma_mem[i].virt_addr; *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, &p_t2->dma_mem[i].phys_addr, GFP_KERNEL); if (!p_t2->dma_mem[i].virt_addr) return -ENOMEM; memset(*p_virt, 0, size); p_t2->dma_mem[i].size = size; total_size -= size; } return 0; } static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 conn_num, total_size, ent_per_page, psz, i; struct phys_mem_desc *p_t2_last_page; struct qed_ilt_client_cfg *p_src; struct qed_src_iids src_iids; struct qed_src_t2 *p_t2; int rc; memset(&src_iids, 0, sizeof(src_iids)); /* if the SRC ILT client is inactive - there are no connection * requiring the searcer, leave. */ p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC]; if (!p_src->active) return 0; qed_cxt_src_iids(p_mngr, &src_iids); conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; total_size = conn_num * sizeof(struct src_ent); /* use the same page size as the SRC ILT client */ psz = ILT_PAGE_IN_BYTES(p_src->p_size.val); p_t2 = &p_mngr->src_t2; p_t2->num_pages = DIV_ROUND_UP(total_size, psz); /* allocate t2 */ p_t2->dma_mem = kcalloc(p_t2->num_pages, sizeof(struct phys_mem_desc), GFP_KERNEL); if (!p_t2->dma_mem) { DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n"); rc = -ENOMEM; goto t2_fail; } rc = qed_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz); if (rc) goto t2_fail; /* Set the t2 pointers */ /* entries per page - must be a power of two */ ent_per_page = psz / sizeof(struct src_ent); p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr; p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page]; p_t2->last_free = (u64)p_t2_last_page->phys_addr + ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent); for (i = 0; i < p_t2->num_pages; i++) { u32 ent_num = min_t(u32, ent_per_page, conn_num); struct src_ent *entries = p_t2->dma_mem[i].virt_addr; u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val; u32 j; for (j = 0; j < ent_num - 1; j++) { val = p_ent_phys + (j + 1) * sizeof(struct src_ent); entries[j].next = cpu_to_be64(val); } if (i < p_t2->num_pages - 1) val = (u64)p_t2->dma_mem[i + 1].phys_addr; else val = 0; entries[j].next = cpu_to_be64(val); conn_num -= ent_num; } return 0; t2_fail: qed_cxt_src_t2_free(p_hwfn); return rc; } #define for_each_ilt_valid_client(pos, clients) \ for (pos = 0; pos < MAX_ILT_CLIENTS; pos++) \ if (!clients[pos].active) { \ continue; \ } else \ /* Total number of ILT lines used by this PF */ static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients) { u32 size = 0; u32 i; for_each_ilt_valid_client(i, ilt_clients) size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1); return size; } static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients; struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 ilt_size, i; ilt_size = qed_cxt_ilt_shadow_size(p_cli); for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) { struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i]; if (p_dma->virt_addr) dma_free_coherent(&p_hwfn->cdev->pdev->dev, p_dma->size, p_dma->virt_addr, p_dma->phys_addr); p_dma->virt_addr = NULL; } kfree(p_mngr->ilt_shadow); } static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, struct qed_ilt_cli_blk *p_blk, enum ilt_clients ilt_client, u32 start_line_offset) { struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow; u32 lines, line, sz_left, lines_to_skip = 0; /* Special handling for RoCE that supports dynamic allocation */ if (QED_IS_RDMA_PERSONALITY(p_hwfn) && ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM)) return 0; lines_to_skip = p_blk->dynamic_line_cnt; if (!p_blk->total_size) return 0; sz_left = p_blk->total_size; lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip; line = p_blk->start_line + start_line_offset - p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip; for (; lines; lines--) { dma_addr_t p_phys; void *p_virt; u32 size; size = min_t(u32, sz_left, p_blk->real_size_in_page); p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, &p_phys, GFP_KERNEL); if (!p_virt) return -ENOMEM; ilt_shadow[line].phys_addr = p_phys; ilt_shadow[line].virt_addr = p_virt; ilt_shadow[line].size = size; DP_VERBOSE(p_hwfn, QED_MSG_ILT, "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n", line, (u64)p_phys, p_virt, size); sz_left -= size; line++; } return 0; } static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_ilt_client_cfg *clients = p_mngr->clients; struct qed_ilt_cli_blk *p_blk; u32 size, i, j, k; int rc; size = qed_cxt_ilt_shadow_size(clients); p_mngr->ilt_shadow = kcalloc(size, sizeof(struct phys_mem_desc), GFP_KERNEL); if (!p_mngr->ilt_shadow) { rc = -ENOMEM; goto ilt_shadow_fail; } DP_VERBOSE(p_hwfn, QED_MSG_ILT, "Allocated 0x%x bytes for ilt shadow\n", (u32)(size * sizeof(struct phys_mem_desc))); for_each_ilt_valid_client(i, clients) { for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) { p_blk = &clients[i].pf_blks[j]; rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0); if (rc) goto ilt_shadow_fail; } for (k = 0; k < p_mngr->vf_count; k++) { for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) { u32 lines = clients[i].vf_total_lines * k; p_blk = &clients[i].vf_blks[j]; rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines); if (rc) goto ilt_shadow_fail; } } } return 0; ilt_shadow_fail: qed_ilt_shadow_free(p_hwfn); return rc; } static void qed_cid_map_free(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 type, vf; for (type = 0; type < MAX_CONN_TYPES; type++) { bitmap_free(p_mngr->acquired[type].cid_map); p_mngr->acquired[type].max_count = 0; p_mngr->acquired[type].start_cid = 0; for (vf = 0; vf < MAX_NUM_VFS; vf++) { bitmap_free(p_mngr->acquired_vf[type][vf].cid_map); p_mngr->acquired_vf[type][vf].max_count = 0; p_mngr->acquired_vf[type][vf].start_cid = 0; } } } static int qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn, u32 type, u32 cid_start, u32 cid_count, struct qed_cid_acquired_map *p_map) { if (!cid_count) return 0; p_map->cid_map = bitmap_zalloc(cid_count, GFP_KERNEL); if (!p_map->cid_map) return -ENOMEM; p_map->max_count = cid_count; p_map->start_cid = cid_start; DP_VERBOSE(p_hwfn, QED_MSG_CXT, "Type %08x start: %08x count %08x\n", type, p_map->start_cid, p_map->max_count); return 0; } static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 start_cid = 0, vf_start_cid = 0; u32 type, vf; for (type = 0; type < MAX_CONN_TYPES; type++) { struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type]; struct qed_cid_acquired_map *p_map; /* Handle PF maps */ p_map = &p_mngr->acquired[type]; if (qed_cid_map_alloc_single(p_hwfn, type, start_cid, p_cfg->cid_count, p_map)) goto cid_map_fail; /* Handle VF maps */ for (vf = 0; vf < MAX_NUM_VFS; vf++) { p_map = &p_mngr->acquired_vf[type][vf]; if (qed_cid_map_alloc_single(p_hwfn, type, vf_start_cid, p_cfg->cids_per_vf, p_map)) goto cid_map_fail; } start_cid += p_cfg->cid_count; vf_start_cid += p_cfg->cids_per_vf; } return 0; cid_map_fail: qed_cid_map_free(p_hwfn); return -ENOMEM; } int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *clients; struct qed_cxt_mngr *p_mngr; u32 i; p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL); if (!p_mngr) return -ENOMEM; /* Initialize ILT client registers */ clients = p_mngr->clients; clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT); clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT); clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE); clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT); clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT); clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE); clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT); clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT); clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE); clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT); clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT); clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE); clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT); clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT); clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE); clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT); clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT); clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE); /* default ILT page size for all clients is 64K */ for (i = 0; i < MAX_ILT_CLIENTS; i++) p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn); /* Initialize task sizes */ p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn); p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn); if (p_hwfn->cdev->p_iov_info) { p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs; p_mngr->first_vf_in_pf = p_hwfn->cdev->p_iov_info->first_vf_in_pf; } /* Initialize the dynamic ILT allocation mutex */ mutex_init(&p_mngr->mutex); /* Set the cxt mangr pointer priori to further allocations */ p_hwfn->p_cxt_mngr = p_mngr; return 0; } int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn) { int rc; /* Allocate the ILT shadow table */ rc = qed_ilt_shadow_alloc(p_hwfn); if (rc) goto tables_alloc_fail; /* Allocate the T2 table */ rc = qed_cxt_src_t2_alloc(p_hwfn); if (rc) goto tables_alloc_fail; /* Allocate and initialize the acquired cids bitmaps */ rc = qed_cid_map_alloc(p_hwfn); if (rc) goto tables_alloc_fail; return 0; tables_alloc_fail: qed_cxt_mngr_free(p_hwfn); return rc; } void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn) { if (!p_hwfn->p_cxt_mngr) return; qed_cid_map_free(p_hwfn); qed_cxt_src_t2_free(p_hwfn); qed_ilt_shadow_free(p_hwfn); kfree(p_hwfn->p_cxt_mngr); p_hwfn->p_cxt_mngr = NULL; } void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_cid_acquired_map *p_map; struct qed_conn_type_cfg *p_cfg; int type; /* Reset acquired cids */ for (type = 0; type < MAX_CONN_TYPES; type++) { u32 vf; p_cfg = &p_mngr->conn_cfg[type]; if (p_cfg->cid_count) { p_map = &p_mngr->acquired[type]; bitmap_zero(p_map->cid_map, p_map->max_count); } if (!p_cfg->cids_per_vf) continue; for (vf = 0; vf < MAX_NUM_VFS; vf++) { p_map = &p_mngr->acquired_vf[type][vf]; bitmap_zero(p_map->cid_map, p_map->max_count); } } } /* CDU Common */ #define CDUC_CXT_SIZE_SHIFT \ CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT #define CDUC_CXT_SIZE_MASK \ (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT) #define CDUC_BLOCK_WASTE_SHIFT \ CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT #define CDUC_BLOCK_WASTE_MASK \ (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT) #define CDUC_NCIB_SHIFT \ CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT #define CDUC_NCIB_MASK \ (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT) #define CDUT_TYPE0_CXT_SIZE_SHIFT \ CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT #define CDUT_TYPE0_CXT_SIZE_MASK \ (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \ CDUT_TYPE0_CXT_SIZE_SHIFT) #define CDUT_TYPE0_BLOCK_WASTE_SHIFT \ CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT #define CDUT_TYPE0_BLOCK_WASTE_MASK \ (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \ CDUT_TYPE0_BLOCK_WASTE_SHIFT) #define CDUT_TYPE0_NCIB_SHIFT \ CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT #define CDUT_TYPE0_NCIB_MASK \ (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \ CDUT_TYPE0_NCIB_SHIFT) #define CDUT_TYPE1_CXT_SIZE_SHIFT \ CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT #define CDUT_TYPE1_CXT_SIZE_MASK \ (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \ CDUT_TYPE1_CXT_SIZE_SHIFT) #define CDUT_TYPE1_BLOCK_WASTE_SHIFT \ CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT #define CDUT_TYPE1_BLOCK_WASTE_MASK \ (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \ CDUT_TYPE1_BLOCK_WASTE_SHIFT) #define CDUT_TYPE1_NCIB_SHIFT \ CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT #define CDUT_TYPE1_NCIB_MASK \ (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \ CDUT_TYPE1_NCIB_SHIFT) static void qed_cdu_init_common(struct qed_hwfn *p_hwfn) { u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0; /* CDUC - connection configuration */ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; cxt_size = CONN_CXT_SIZE(p_hwfn); elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size); SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste); SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page); STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params); /* CDUT - type-0 tasks configuration */ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val; cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0]; elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; /* cxt size and block-waste are multipes of 8 */ cdu_params = 0; SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3)); SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3)); SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page); STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params); /* CDUT - type-1 tasks configuration */ cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1]; elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; /* cxt size and block-waste are multipes of 8 */ cdu_params = 0; SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3)); SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3)); SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page); STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params); } /* CDU PF */ #define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT #define CDU_SEG_REG_TYPE_MASK 0x1 #define CDU_SEG_REG_OFFSET_SHIFT 0 #define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *p_cli; struct qed_tid_seg *p_seg; u32 cdu_seg_params, offset; int i; static const u32 rt_type_offset_arr[] = { CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET, CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET, CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET, CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET }; static const u32 rt_type_offset_fl_arr[] = { CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET, CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET, CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET, CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET }; p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; /* There are initializations only for CDUT during pf Phase */ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { /* Segment 0 */ p_seg = qed_cxt_tid_seg_info(p_hwfn, i); if (!p_seg) continue; /* Note: start_line is already adjusted for the CDU * segment register granularity, so we just need to * divide. Adjustment is implicit as we assume ILT * Page size is larger than 32K! */ offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line - p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; cdu_seg_params = 0; SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params); offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line - p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; cdu_seg_params = 0; SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params); } } void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool is_pf_loading) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_pf_rt_init_params params; struct qed_qm_iids iids; memset(&iids, 0, sizeof(iids)); qed_cxt_qm_iids(p_hwfn, &iids); memset(&params, 0, sizeof(params)); params.port_id = p_hwfn->port_id; params.pf_id = p_hwfn->rel_pf_id; params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; params.is_pf_loading = is_pf_loading; params.num_pf_cids = iids.cids; params.num_vf_cids = iids.vf_cids; params.num_tids = iids.tids; params.start_pq = qm_info->start_pq; params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs; params.num_vf_pqs = qm_info->num_vf_pqs; params.start_vport = qm_info->start_vport; params.num_vports = qm_info->num_vports; params.pf_wfq = qm_info->pf_wfq; params.pf_rl = qm_info->pf_rl; params.pq_params = qm_info->qm_pq_params; params.vport_params = qm_info->qm_vport_params; qed_qm_pf_rt_init(p_hwfn, p_ptt, &params); } /* CM PF */ static void qed_cm_init_pf(struct qed_hwfn *p_hwfn) { /* XCM pure-LB queue */ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB)); } /* DQ PF */ static void qed_dq_init_pf(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0; dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid); dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid); dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid); dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid); dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid); dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid); dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid); dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid); dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid); dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid); dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid); dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid); /* Connection types 6 & 7 are not in use, yet they must be configured * as the highest possible connection. Not configuring them means the * defaults will be used, and with a large number of cids a bug may * occur, if the defaults will be smaller than dq_pf_max_cid / * dq_vf_max_cid. */ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid); STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid); STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid); } static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *ilt_clients; int i; ilt_clients = p_hwfn->p_cxt_mngr->clients; for_each_ilt_valid_client(i, ilt_clients) { STORE_RT_REG(p_hwfn, ilt_clients[i].first.reg, ilt_clients[i].first.val); STORE_RT_REG(p_hwfn, ilt_clients[i].last.reg, ilt_clients[i].last.val); STORE_RT_REG(p_hwfn, ilt_clients[i].p_size.reg, ilt_clients[i].p_size.val); } } static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *p_cli; u32 blk_factor; /* For simplicty we set the 'block' to be an ILT page */ if (p_hwfn->cdev->p_iov_info) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; STORE_RT_REG(p_hwfn, PSWRQ2_REG_VF_BASE_RT_OFFSET, p_iov->first_vf_in_pf); STORE_RT_REG(p_hwfn, PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET, p_iov->first_vf_in_pf + p_iov->total_vfs); } p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); if (p_cli->active) { STORE_RT_REG(p_hwfn, PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET, blk_factor); STORE_RT_REG(p_hwfn, PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET, p_cli->pf_total_lines); STORE_RT_REG(p_hwfn, PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET, p_cli->vf_total_lines); } p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); if (p_cli->active) { STORE_RT_REG(p_hwfn, PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET, blk_factor); STORE_RT_REG(p_hwfn, PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET, p_cli->pf_total_lines); STORE_RT_REG(p_hwfn, PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET, p_cli->vf_total_lines); } p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM]; blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); if (p_cli->active) { STORE_RT_REG(p_hwfn, PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor); STORE_RT_REG(p_hwfn, PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET, p_cli->pf_total_lines); STORE_RT_REG(p_hwfn, PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET, p_cli->vf_total_lines); } } /* ILT (PSWRQ2) PF */ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *clients; struct qed_cxt_mngr *p_mngr; struct phys_mem_desc *p_shdw; u32 line, rt_offst, i; qed_ilt_bounds_init(p_hwfn); qed_ilt_vf_bounds_init(p_hwfn); p_mngr = p_hwfn->p_cxt_mngr; p_shdw = p_mngr->ilt_shadow; clients = p_hwfn->p_cxt_mngr->clients; for_each_ilt_valid_client(i, clients) { /** Client's 1st val and RT array are absolute, ILT shadows' * lines are relative. */ line = clients[i].first.val - p_mngr->pf_start_line; rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET + clients[i].first.val * ILT_ENTRY_IN_REGS; for (; line <= clients[i].last.val - p_mngr->pf_start_line; line++, rt_offst += ILT_ENTRY_IN_REGS) { u64 ilt_hw_entry = 0; /** p_virt could be NULL incase of dynamic * allocation */ if (p_shdw[line].virt_addr) { SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR, (p_shdw[line].phys_addr >> 12)); DP_VERBOSE(p_hwfn, QED_MSG_ILT, "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n", rt_offst, line, i, (u64)(p_shdw[line].phys_addr >> 12)); } STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry); } } } /* SRC (Searcher) PF */ static void qed_src_init_pf(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 rounded_conn_num, conn_num, conn_max; struct qed_src_iids src_iids; memset(&src_iids, 0, sizeof(src_iids)); qed_cxt_src_iids(p_mngr, &src_iids); conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; if (!conn_num) return; conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS); rounded_conn_num = roundup_pow_of_two(conn_max); STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num); STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET, ilog2(rounded_conn_num)); STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET, p_hwfn->p_cxt_mngr->src_t2.first_free); STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET, p_hwfn->p_cxt_mngr->src_t2.last_free); } /* Timers PF */ #define TM_CFG_NUM_IDS_SHIFT 0 #define TM_CFG_NUM_IDS_MASK 0xFFFFULL #define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16 #define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL #define TM_CFG_PARENT_PF_SHIFT 25 #define TM_CFG_PARENT_PF_MASK 0x7ULL #define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30 #define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL #define TM_CFG_TID_OFFSET_SHIFT 30 #define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL #define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49 #define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 active_seg_mask = 0, tm_offset, rt_reg; struct qed_tm_iids tm_iids; u64 cfg_word; u8 i; memset(&tm_iids, 0, sizeof(tm_iids)); qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids); /* @@@TBD No pre-scan for now */ /* Note: We assume consecutive VFs for a PF */ for (i = 0; i < p_mngr->vf_count; i++) { cfg_word = 0; SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids); SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + (sizeof(cfg_word) / sizeof(u32)) * (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i); STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); } cfg_word = 0; SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids); SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */ rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + (sizeof(cfg_word) / sizeof(u32)) * (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id); STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); /* enale scan */ STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET, tm_iids.pf_cids ? 0x1 : 0x0); /* @@@TBD how to enable the scan for the VFs */ tm_offset = tm_iids.per_vf_cids; /* Note: We assume consecutive VFs for a PF */ for (i = 0; i < p_mngr->vf_count; i++) { cfg_word = 0; SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids); SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0); rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + (sizeof(cfg_word) / sizeof(u32)) * (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i); STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); } tm_offset = tm_iids.pf_cids; for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { cfg_word = 0; SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]); SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0); rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + (sizeof(cfg_word) / sizeof(u32)) * (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i); STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0); tm_offset += tm_iids.pf_tids[i]; } if (QED_IS_RDMA_PERSONALITY(p_hwfn)) active_seg_mask = 0; STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask); /* @@@TBD how to enable the scan for the VFs */ } static void qed_prs_init_common(struct qed_hwfn *p_hwfn) { if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) && p_hwfn->pf_params.fcoe_pf_params.is_target) STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0); } static void qed_prs_init_pf(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_conn_type_cfg *p_fcoe; struct qed_tid_seg *p_tid; p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE]; /* If FCoE is active set the MAX OX_ID (tid) in the Parser */ if (!p_fcoe->cid_count) return; p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG]; if (p_hwfn->pf_params.fcoe_pf_params.is_target) { STORE_RT_REG_AGG(p_hwfn, PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET, p_tid->count); } else { STORE_RT_REG_AGG(p_hwfn, PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET, p_tid->count); } } void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) { qed_cdu_init_common(p_hwfn); qed_prs_init_common(p_hwfn); } void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { qed_qm_init_pf(p_hwfn, p_ptt, true); qed_cm_init_pf(p_hwfn); qed_dq_init_pf(p_hwfn); qed_cdu_init_pf(p_hwfn); qed_ilt_init_pf(p_hwfn); qed_src_init_pf(p_hwfn); qed_tm_init_pf(p_hwfn); qed_prs_init_pf(p_hwfn); } int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *p_cid, u8 vfid) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_cid_acquired_map *p_map; u32 rel_cid; if (type >= MAX_CONN_TYPES) { DP_NOTICE(p_hwfn, "Invalid protocol type %d", type); return -EINVAL; } if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) { DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid); return -EINVAL; } /* Determine the right map to take this CID from */ if (vfid == QED_CXT_PF_CID) p_map = &p_mngr->acquired[type]; else p_map = &p_mngr->acquired_vf[type][vfid]; if (!p_map->cid_map) { DP_NOTICE(p_hwfn, "Invalid protocol type %d", type); return -EINVAL; } rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count); if (rel_cid >= p_map->max_count) { DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type); return -EINVAL; } __set_bit(rel_cid, p_map->cid_map); *p_cid = rel_cid + p_map->start_cid; DP_VERBOSE(p_hwfn, QED_MSG_CXT, "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n", *p_cid, rel_cid, vfid, type); return 0; } int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *p_cid) { return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID); } static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid, enum protocol_type *p_type, struct qed_cid_acquired_map **pp_map) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 rel_cid; /* Iterate over protocols and find matching cid range */ for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) { if (vfid == QED_CXT_PF_CID) *pp_map = &p_mngr->acquired[*p_type]; else *pp_map = &p_mngr->acquired_vf[*p_type][vfid]; if (!((*pp_map)->cid_map)) continue; if (cid >= (*pp_map)->start_cid && cid < (*pp_map)->start_cid + (*pp_map)->max_count) break; } if (*p_type == MAX_CONN_TYPES) { DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid); goto fail; } rel_cid = cid - (*pp_map)->start_cid; if (!test_bit(rel_cid, (*pp_map)->cid_map)) { DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired", cid, vfid); goto fail; } return true; fail: *p_type = MAX_CONN_TYPES; *pp_map = NULL; return false; } void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid) { struct qed_cid_acquired_map *p_map = NULL; enum protocol_type type; bool b_acquired; u32 rel_cid; if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) { DP_NOTICE(p_hwfn, "Trying to return incorrect CID belonging to VF %02x\n", vfid); return; } /* Test acquired and find matching per-protocol map */ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid, &type, &p_map); if (!b_acquired) return; rel_cid = cid - p_map->start_cid; clear_bit(rel_cid, p_map->cid_map); DP_VERBOSE(p_hwfn, QED_MSG_CXT, "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n", cid, rel_cid, vfid, type); } void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid) { _qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID); } int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_cid_acquired_map *p_map = NULL; u32 conn_cxt_size, hw_p_size, cxts_per_p, line; enum protocol_type type; bool b_acquired; /* Test acquired and find matching per-protocol map */ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, QED_CXT_PF_CID, &type, &p_map); if (!b_acquired) return -EINVAL; /* set the protocl type */ p_info->type = type; /* compute context virtual pointer */ hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; conn_cxt_size = CONN_CXT_SIZE(p_hwfn); cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size; line = p_info->iid / cxts_per_p; /* Make sure context is allocated (dynamic allocation) */ if (!p_mngr->ilt_shadow[line].virt_addr) return -EINVAL; p_info->p_cxt = p_mngr->ilt_shadow[line].virt_addr + p_info->iid % cxts_per_p * conn_cxt_size; DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT), "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n", p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid); return 0; } static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, struct qed_rdma_pf_params *p_params, u32 num_tasks) { u32 num_cons, num_qps; enum protocol_type proto; if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { DP_VERBOSE(p_hwfn, QED_MSG_SP, "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE; } switch (p_hwfn->hw_info.personality) { case QED_PCI_ETH_IWARP: /* Each QP requires one connection */ num_cons = min_t(u32, IWARP_MAX_QPS, p_params->num_qps); proto = PROTOCOLID_IWARP; break; case QED_PCI_ETH_ROCE: num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps); num_cons = num_qps * 2; /* each QP requires two connections */ proto = PROTOCOLID_ROCE; break; default: return; } if (num_cons && num_tasks) { u32 num_srqs, num_xrc_srqs; qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0); /* Deliberatly passing ROCE for tasks id. This is because * iWARP / RoCE share the task id. */ qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE, QED_CXT_ROCE_TID_SEG, 1, num_tasks, false); num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs); /* XRC SRQs populate a single ILT page */ num_xrc_srqs = qed_cxt_xrc_srqs_per_page(p_hwfn); qed_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs); } else { DP_INFO(p_hwfn->cdev, "RDMA personality used without setting params!\n"); } } int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) { /* Set the number of required CORE connections */ u32 core_cids = 1; /* SPQ */ if (p_hwfn->using_ll2) core_cids += 4; qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); switch (p_hwfn->hw_info.personality) { case QED_PCI_ETH_RDMA: case QED_PCI_ETH_IWARP: case QED_PCI_ETH_ROCE: { qed_rdma_set_pf_params(p_hwfn, &p_hwfn-> pf_params.rdma_pf_params, rdma_tasks); /* no need for break since RoCE coexist with Ethernet */ } fallthrough; case QED_PCI_ETH: { struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params; if (!p_params->num_vf_cons) p_params->num_vf_cons = ETH_PF_PARAMS_VF_CONS_DEFAULT; qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, p_params->num_cons, p_params->num_vf_cons); p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters; break; } case QED_PCI_FCOE: { struct qed_fcoe_pf_params *p_params; p_params = &p_hwfn->pf_params.fcoe_pf_params; if (p_params->num_cons && p_params->num_tasks) { qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_FCOE, p_params->num_cons, 0); qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE, QED_CXT_FCOE_TID_SEG, 0, p_params->num_tasks, true); } else { DP_INFO(p_hwfn->cdev, "Fcoe personality used without setting params!\n"); } break; } case QED_PCI_ISCSI: { struct qed_iscsi_pf_params *p_params; p_params = &p_hwfn->pf_params.iscsi_pf_params; if (p_params->num_cons && p_params->num_tasks) { qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_TCP_ULP, p_params->num_cons, 0); qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_TCP_ULP, QED_CXT_TCP_ULP_TID_SEG, 0, p_params->num_tasks, true); } else { DP_INFO(p_hwfn->cdev, "Iscsi personality used without setting params!\n"); } break; } case QED_PCI_NVMETCP: { struct qed_nvmetcp_pf_params *p_params; p_params = &p_hwfn->pf_params.nvmetcp_pf_params; if (p_params->num_cons && p_params->num_tasks) { qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_TCP_ULP, p_params->num_cons, 0); qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_TCP_ULP, QED_CXT_TCP_ULP_TID_SEG, 0, p_params->num_tasks, true); } else { DP_INFO(p_hwfn->cdev, "NvmeTCP personality used without setting params!\n"); } break; } default: return -EINVAL; } return 0; } int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, struct qed_tid_mem *p_info) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 proto, seg, total_lines, i, shadow_line; struct qed_ilt_client_cfg *p_cli; struct qed_ilt_cli_blk *p_fl_seg; struct qed_tid_seg *p_seg_info; /* Verify the personality */ switch (p_hwfn->hw_info.personality) { case QED_PCI_FCOE: proto = PROTOCOLID_FCOE; seg = QED_CXT_FCOE_TID_SEG; break; case QED_PCI_ISCSI: case QED_PCI_NVMETCP: proto = PROTOCOLID_TCP_ULP; seg = QED_CXT_TCP_ULP_TID_SEG; break; default: return -EINVAL; } p_cli = &p_mngr->clients[ILT_CLI_CDUT]; if (!p_cli->active) return -EINVAL; p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; if (!p_seg_info->has_fl_mem) return -EINVAL; p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; total_lines = DIV_ROUND_UP(p_fl_seg->total_size, p_fl_seg->real_size_in_page); for (i = 0; i < total_lines; i++) { shadow_line = i + p_fl_seg->start_line - p_hwfn->p_cxt_mngr->pf_start_line; p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr; } p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) - p_fl_seg->real_size_in_page; p_info->tid_size = p_mngr->task_type_size[p_seg_info->type]; p_info->num_tids_per_block = p_fl_seg->real_size_in_page / p_info->tid_size; return 0; } /* This function is very RoCE oriented, if another protocol in the future * will want this feature we'll need to modify the function to be more generic */ int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, enum qed_cxt_elem_type elem_type, u32 iid) { u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line; struct tdif_task_context *tdif_context; struct qed_ilt_client_cfg *p_cli; struct qed_ilt_cli_blk *p_blk; struct qed_ptt *p_ptt; dma_addr_t p_phys; u64 ilt_hw_entry; void *p_virt; u32 flags1; int rc = 0; switch (elem_type) { case QED_ELEM_CXT: p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; elem_size = CONN_CXT_SIZE(p_hwfn); p_blk = &p_cli->pf_blks[CDUC_BLK]; break; case QED_ELEM_SRQ: /* The first ILT page is not used for regular SRQs. Skip it. */ iid += p_hwfn->p_cxt_mngr->xrc_srq_count; p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; elem_size = SRQ_CXT_SIZE; p_blk = &p_cli->pf_blks[SRQ_BLK]; break; case QED_ELEM_XRC_SRQ: p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; elem_size = XRC_SRQ_CXT_SIZE; p_blk = &p_cli->pf_blks[SRQ_BLK]; break; case QED_ELEM_TASK: p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)]; break; default: DP_NOTICE(p_hwfn, "-EOPNOTSUPP elem type = %d", elem_type); return -EOPNOTSUPP; } /* Calculate line in ilt */ hw_p_size = p_cli->p_size.val; elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size; line = p_blk->start_line + (iid / elems_per_p); shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line; /* If line is already allocated, do nothing, otherwise allocate it and * write it to the PSWRQ2 registers. * This section can be run in parallel from different contexts and thus * a mutex protection is needed. */ mutex_lock(&p_hwfn->p_cxt_mngr->mutex); if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr) goto out0; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) { DP_NOTICE(p_hwfn, "QED_TIME_OUT on ptt acquire - dynamic allocation"); rc = -EBUSY; goto out0; } p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, p_blk->real_size_in_page, &p_phys, GFP_KERNEL); if (!p_virt) { rc = -ENOMEM; goto out1; } /* configuration of refTagMask to 0xF is required for RoCE DIF MR only, * to compensate for a HW bug, but it is configured even if DIF is not * enabled. This is harmless and allows us to avoid a dedicated API. We * configure the field for all of the contexts on the newly allocated * page. */ if (elem_type == QED_ELEM_TASK) { u32 elem_i; u8 *elem_start = (u8 *)p_virt; union type1_task_context *elem; for (elem_i = 0; elem_i < elems_per_p; elem_i++) { elem = (union type1_task_context *)elem_start; tdif_context = &elem->roce_ctx.tdif_context; flags1 = le32_to_cpu(tdif_context->flags1); SET_FIELD(flags1, TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf); tdif_context->flags1 = cpu_to_le32(flags1); elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn); } } p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt; p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys; p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size = p_blk->real_size_in_page; /* compute absolute offset */ reg_offset = PSWRQ2_REG_ILT_MEMORY + (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS); ilt_hw_entry = 0; SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR, (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr >> 12)); /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */ qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry, reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), NULL); if (elem_type == QED_ELEM_CXT) { u32 last_cid_allocated = (1 + (iid / elems_per_p)) * elems_per_p; /* Update the relevant register in the parser */ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, last_cid_allocated - 1); if (!p_hwfn->b_rdma_enabled_in_prs) { /* Enable RDMA search */ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); p_hwfn->b_rdma_enabled_in_prs = true; } } out1: qed_ptt_release(p_hwfn, p_ptt); out0: mutex_unlock(&p_hwfn->p_cxt_mngr->mutex); return rc; } /* This function is very RoCE oriented, if another protocol in the future * will want this feature we'll need to modify the function to be more generic */ static int qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn, enum qed_cxt_elem_type elem_type, u32 start_iid, u32 count) { u32 start_line, end_line, shadow_start_line, shadow_end_line; u32 reg_offset, elem_size, hw_p_size, elems_per_p; struct qed_ilt_client_cfg *p_cli; struct qed_ilt_cli_blk *p_blk; u32 end_iid = start_iid + count; struct qed_ptt *p_ptt; u64 ilt_hw_entry = 0; u32 i; switch (elem_type) { case QED_ELEM_CXT: p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; elem_size = CONN_CXT_SIZE(p_hwfn); p_blk = &p_cli->pf_blks[CDUC_BLK]; break; case QED_ELEM_SRQ: p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; elem_size = SRQ_CXT_SIZE; p_blk = &p_cli->pf_blks[SRQ_BLK]; break; case QED_ELEM_XRC_SRQ: p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; elem_size = XRC_SRQ_CXT_SIZE; p_blk = &p_cli->pf_blks[SRQ_BLK]; break; case QED_ELEM_TASK: p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)]; break; default: DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type); return -EINVAL; } /* Calculate line in ilt */ hw_p_size = p_cli->p_size.val; elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size; start_line = p_blk->start_line + (start_iid / elems_per_p); end_line = p_blk->start_line + (end_iid / elems_per_p); if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p)) end_line--; shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line; shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) { DP_NOTICE(p_hwfn, "QED_TIME_OUT on ptt acquire - dynamic allocation"); return -EBUSY; } for (i = shadow_start_line; i < shadow_end_line; i++) { if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr) continue; dma_free_coherent(&p_hwfn->cdev->pdev->dev, p_hwfn->p_cxt_mngr->ilt_shadow[i].size, p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr, p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr); p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = NULL; p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0; p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0; /* compute absolute offset */ reg_offset = PSWRQ2_REG_ILT_MEMORY + ((start_line++) * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS); /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a * wide-bus. */ qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t) &ilt_hw_entry, reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), NULL); } qed_ptt_release(p_hwfn, p_ptt); return 0; } int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto) { int rc; u32 cid; /* Free Connection CXT */ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT, qed_cxt_get_proto_cid_start(p_hwfn, proto), qed_cxt_get_proto_cid_count(p_hwfn, proto, &cid)); if (rc) return rc; /* Free Task CXT ( Intentionally RoCE as task-id is shared between * RoCE and iWARP ) */ proto = PROTOCOLID_ROCE; rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0, qed_cxt_get_proto_tid_count(p_hwfn, proto)); if (rc) return rc; /* Free TSDM CXT */ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_XRC_SRQ, 0, p_hwfn->p_cxt_mngr->xrc_srq_count); rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, p_hwfn->p_cxt_mngr->xrc_srq_count, p_hwfn->p_cxt_mngr->srq_count); return rc; } int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn, u32 tid, u8 ctx_type, void **pp_task_ctx) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_ilt_client_cfg *p_cli; struct qed_tid_seg *p_seg_info; struct qed_ilt_cli_blk *p_seg; u32 num_tids_per_block; u32 tid_size, ilt_idx; u32 total_lines; u32 proto, seg; /* Verify the personality */ switch (p_hwfn->hw_info.personality) { case QED_PCI_FCOE: proto = PROTOCOLID_FCOE; seg = QED_CXT_FCOE_TID_SEG; break; case QED_PCI_ISCSI: case QED_PCI_NVMETCP: proto = PROTOCOLID_TCP_ULP; seg = QED_CXT_TCP_ULP_TID_SEG; break; default: return -EINVAL; } p_cli = &p_mngr->clients[ILT_CLI_CDUT]; if (!p_cli->active) return -EINVAL; p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; if (ctx_type == QED_CTX_WORKING_MEM) { p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)]; } else if (ctx_type == QED_CTX_FL_MEM) { if (!p_seg_info->has_fl_mem) return -EINVAL; p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; } else { return -EINVAL; } total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page); tid_size = p_mngr->task_type_size[p_seg_info->type]; num_tids_per_block = p_seg->real_size_in_page / tid_size; if (total_lines < tid / num_tids_per_block) return -EINVAL; ilt_idx = tid / num_tids_per_block + p_seg->start_line - p_mngr->pf_start_line; *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr + (tid % num_tids_per_block) * tid_size; return 0; } static u16 qed_blk_calculate_pages(struct qed_ilt_cli_blk *p_blk) { if (p_blk->real_size_in_page == 0) return 0; return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page); } u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *p_cli; struct qed_ilt_cli_blk *p_blk; u16 i, pages = 0; p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]; pages += qed_blk_calculate_pages(p_blk); } return pages; } u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *p_cli; struct qed_ilt_cli_blk *p_blk; u16 i, pages = 0; p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) { p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)]; pages += qed_blk_calculate_pages(p_blk); } return pages; } u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *p_cli; struct qed_ilt_cli_blk *p_blk; u16 i, pages = 0; p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)]; pages += qed_blk_calculate_pages(p_blk); } return pages; } u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn) { struct qed_ilt_client_cfg *p_cli; struct qed_ilt_cli_blk *p_blk; u16 pages = 0, i; p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) { p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)]; pages += qed_blk_calculate_pages(p_blk); } return pages; }
linux-master
drivers/net/ethernet/qlogic/qed/qed_cxt.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <asm/byteorder.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/etherdevice.h> #include "qed.h" #include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_hsi.h" #include "qed_mfw_hsi.h" #include "qed_hw.h" #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sriov.h" #define GRCBASE_MCP 0xe00000 #define QED_MCP_RESP_ITER_US 10 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)), \ _val) #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset))) #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ offsetof(struct public_drv_mb, _field), _val) #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \ DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \ offsetof(struct public_drv_mb, _field)) #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ DRV_ID_PDA_COMP_VER_SHIFT) #define MCP_BYTES_PER_MBIT_SHIFT 17 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn) { if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) return false; return true; } void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_PORT); u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr); p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, MFW_PORT(p_hwfn)); DP_VERBOSE(p_hwfn, QED_MSG_SP, "port_addr = 0x%x, port_id 0x%02x\n", p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); } void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); u32 tmp, i; if (!p_hwfn->mcp_info->public_base) return; for (i = 0; i < length; i++) { tmp = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->mfw_mb_addr + (i << 2) + sizeof(u32)); /* The MB data is actually BE; Need to force it to cpu */ ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = be32_to_cpu((__force __be32)tmp); } } struct qed_mcp_cmd_elem { struct list_head list; struct qed_mcp_mb_params *p_mb_params; u16 expected_seq_num; bool b_is_completed; }; /* Must be called while cmd_lock is acquired */ static struct qed_mcp_cmd_elem * qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn, struct qed_mcp_mb_params *p_mb_params, u16 expected_seq_num) { struct qed_mcp_cmd_elem *p_cmd_elem = NULL; p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC); if (!p_cmd_elem) goto out; p_cmd_elem->p_mb_params = p_mb_params; p_cmd_elem->expected_seq_num = expected_seq_num; list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); out: return p_cmd_elem; } /* Must be called while cmd_lock is acquired */ static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn, struct qed_mcp_cmd_elem *p_cmd_elem) { list_del(&p_cmd_elem->list); kfree(p_cmd_elem); } /* Must be called while cmd_lock is acquired */ static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn, u16 seq_num) { struct qed_mcp_cmd_elem *p_cmd_elem = NULL; list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) { if (p_cmd_elem->expected_seq_num == seq_num) return p_cmd_elem; } return NULL; } int qed_mcp_free(struct qed_hwfn *p_hwfn) { if (p_hwfn->mcp_info) { struct qed_mcp_cmd_elem *p_cmd_elem = NULL, *p_tmp; kfree(p_hwfn->mcp_info->mfw_mb_cur); kfree(p_hwfn->mcp_info->mfw_mb_shadow); spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); list_for_each_entry_safe(p_cmd_elem, p_tmp, &p_hwfn->mcp_info->cmd_list, list) { qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); } spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); } kfree(p_hwfn->mcp_info); p_hwfn->mcp_info = NULL; return 0; } /* Maximum of 1 sec to wait for the SHMEM ready indication */ #define QED_MCP_SHMEM_RDY_MAX_RETRIES 20 #define QED_MCP_SHMEM_RDY_ITER_MS 50 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_info *p_info = p_hwfn->mcp_info; u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES; u8 msec = QED_MCP_SHMEM_RDY_ITER_MS; u32 drv_mb_offsize, mfw_mb_offsize; u32 mcp_pf_id = MCP_PF_ID(p_hwfn); p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); if (!p_info->public_base) { DP_NOTICE(p_hwfn, "The address of the MCP scratch-pad is not configured\n"); return -EINVAL; } p_info->public_base |= GRCBASE_MCP; /* Get the MFW MB address and number of supported messages */ mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, SECTION_OFFSIZE_ADDR(p_info->public_base, PUBLIC_MFW_MB)); p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr + offsetof(struct public_mfw_mb, sup_msgs)); /* The driver can notify that there was an MCP reset, and might read the * SHMEM values before the MFW has completed initializing them. * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a * data ready indication. */ while (!p_info->mfw_mb_length && --cnt) { msleep(msec); p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr + offsetof(struct public_mfw_mb, sup_msgs)); } if (!cnt) { DP_NOTICE(p_hwfn, "Failed to get the SHMEM ready notification after %d msec\n", QED_MCP_SHMEM_RDY_MAX_RETRIES * msec); return -EBUSY; } /* Calculate the driver and MFW mailbox address */ drv_mb_offsize = qed_rd(p_hwfn, p_ptt, SECTION_OFFSIZE_ADDR(p_info->public_base, PUBLIC_DRV_MB)); p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id); DP_VERBOSE(p_hwfn, QED_MSG_SP, "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); /* Get the current driver mailbox sequence before sending * the first command */ p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK; /* Get current FW pulse sequence */ p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & DRV_PULSE_SEQ_MASK; p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); return 0; } int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_info *p_info; u32 size; /* Allocate mcp_info structure */ p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL); if (!p_hwfn->mcp_info) goto err; p_info = p_hwfn->mcp_info; /* Initialize the MFW spinlock */ spin_lock_init(&p_info->cmd_lock); spin_lock_init(&p_info->link_lock); spin_lock_init(&p_info->unload_lock); INIT_LIST_HEAD(&p_info->cmd_list); if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) { DP_NOTICE(p_hwfn, "MCP is not initialized\n"); /* Do not free mcp_info here, since public_base indicate that * the MCP is not initialized */ return 0; } size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow) goto err; return 0; err: qed_mcp_free(p_hwfn); return -ENOMEM; } static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); /* Use MCP history register to check if MCP reset occurred between init * time and now. */ if (p_hwfn->mcp_info->mcp_hist != generic_por_0) { DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n", p_hwfn->mcp_info->mcp_hist, generic_por_0); qed_load_mcp_offsets(p_hwfn, p_ptt); qed_mcp_cmd_port_init(p_hwfn, p_ptt); } } int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0; int rc = 0; if (p_hwfn->mcp_info->b_block_cmd) { DP_NOTICE(p_hwfn, "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); return -EBUSY; } /* Ensure that only a single thread is accessing the mailbox */ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); /* Set drv command along with the updated sequence */ qed_mcp_reread_offsets(p_hwfn, p_ptt); seq = ++p_hwfn->mcp_info->drv_mb_seq; DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq)); do { /* Wait for MFW response */ udelay(delay); /* Give the FW up to 500 second (50*1000*10usec) */ } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) && (cnt++ < QED_MCP_RESET_RETRIES)); if (org_mcp_reset_seq != qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { DP_VERBOSE(p_hwfn, QED_MSG_SP, "MCP was reset after %d usec\n", cnt * delay); } else { DP_ERR(p_hwfn, "Failed to reset MCP\n"); rc = -EAGAIN; } spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); return rc; } /* Must be called while cmd_lock is acquired */ static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn) { struct qed_mcp_cmd_elem *p_cmd_elem; /* There is at most one pending command at a certain time, and if it * exists - it is placed at the HEAD of the list. */ if (!list_empty(&p_hwfn->mcp_info->cmd_list)) { p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list, struct qed_mcp_cmd_elem, list); return !p_cmd_elem->b_is_completed; } return false; } /* Must be called while cmd_lock is acquired */ static int qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_mb_params *p_mb_params; struct qed_mcp_cmd_elem *p_cmd_elem; u32 mcp_resp; u16 seq_num; mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK); /* Return if no new non-handled response has been received */ if (seq_num != p_hwfn->mcp_info->drv_mb_seq) return -EAGAIN; p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num); if (!p_cmd_elem) { DP_ERR(p_hwfn, "Failed to find a pending mailbox cmd that expects sequence number %d\n", seq_num); return -EINVAL; } p_mb_params = p_cmd_elem->p_mb_params; /* Get the MFW response along with the sequence number */ p_mb_params->mcp_resp = mcp_resp; /* Get the MFW param */ p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); /* Get the union data */ if (p_mb_params->p_data_dst && p_mb_params->data_dst_size) { u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + offsetof(struct public_drv_mb, union_data); qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, union_data_addr, p_mb_params->data_dst_size); } p_cmd_elem->b_is_completed = true; return 0; } /* Must be called while cmd_lock is acquired */ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_mb_params *p_mb_params, u16 seq_num) { union drv_union_data union_data; u32 union_data_addr; /* Set the union data */ union_data_addr = p_hwfn->mcp_info->drv_mb_addr + offsetof(struct public_drv_mb, union_data); memset(&union_data, 0, sizeof(union_data)); if (p_mb_params->p_data_src && p_mb_params->data_src_size) memcpy(&union_data, p_mb_params->p_data_src, p_mb_params->data_src_size); qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, sizeof(union_data)); /* Set the drv param */ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param); /* Set the drv command along with the sequence number */ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num)); DP_VERBOSE(p_hwfn, QED_MSG_SP, "MFW mailbox: command 0x%08x param 0x%08x\n", (p_mb_params->cmd | seq_num), p_mb_params->param); } static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd) { p_hwfn->mcp_info->b_block_cmd = block_cmd; DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", block_cmd ? "Block" : "Unblock"); } static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; u32 delay = QED_MCP_RESP_ITER_US; cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); udelay(delay); cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); udelay(delay); cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); DP_NOTICE(p_hwfn, "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); } static int _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_mb_params *p_mb_params, u32 max_retries, u32 usecs) { u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000); struct qed_mcp_cmd_elem *p_cmd_elem; u16 seq_num; int rc = 0; /* Wait until the mailbox is non-occupied */ do { /* Exit the loop if there is no pending command, or if the * pending command is completed during this iteration. * The spinlock stays locked until the command is sent. */ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); if (!qed_mcp_has_pending_cmd(p_hwfn)) break; rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt); if (!rc) break; else if (rc != -EAGAIN) goto err; spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) msleep(msecs); else udelay(usecs); } while (++cnt < max_retries); if (cnt >= max_retries) { DP_NOTICE(p_hwfn, "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n", p_mb_params->cmd, p_mb_params->param); return -EAGAIN; } /* Send the mailbox command */ qed_mcp_reread_offsets(p_hwfn, p_ptt); seq_num = ++p_hwfn->mcp_info->drv_mb_seq; p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num); if (!p_cmd_elem) { rc = -ENOMEM; goto err; } __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num); spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); /* Wait for the MFW response */ do { /* Exit the loop if the command is already completed, or if the * command is completed during this iteration. * The spinlock stays locked until the list element is removed. */ if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) msleep(msecs); else udelay(usecs); spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); if (p_cmd_elem->b_is_completed) break; rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt); if (!rc) break; else if (rc != -EAGAIN) goto err; spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); } while (++cnt < max_retries); if (cnt >= max_retries) { DP_NOTICE(p_hwfn, "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", p_mb_params->cmd, p_mb_params->param); qed_mcp_print_cpu_info(p_hwfn, p_ptt); spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK)) qed_mcp_cmd_set_blocking(p_hwfn, true); qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_MFW_RESP_FAIL, NULL); return -EAGAIN; } qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); DP_VERBOSE(p_hwfn, QED_MSG_SP, "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", p_mb_params->mcp_resp, p_mb_params->mcp_param, (cnt * usecs) / 1000, (cnt * usecs) % 1000); /* Clear the sequence number from the MFW response */ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; return 0; err: spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); return rc; } static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_mb_params *p_mb_params) { size_t union_data_size = sizeof(union drv_union_data); u32 max_retries = QED_DRV_MB_MAX_RETRIES; u32 usecs = QED_MCP_RESP_ITER_US; /* MCP not initialized */ if (!qed_mcp_is_init(p_hwfn)) { DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); return -EBUSY; } if (p_hwfn->mcp_info->b_block_cmd) { DP_NOTICE(p_hwfn, "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", p_mb_params->cmd, p_mb_params->param); return -EBUSY; } if (p_mb_params->data_src_size > union_data_size || p_mb_params->data_dst_size > union_data_size) { DP_ERR(p_hwfn, "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n", p_mb_params->data_src_size, p_mb_params->data_dst_size, union_data_size); return -EINVAL; } if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { max_retries = DIV_ROUND_UP(max_retries, 1000); usecs *= 1000; } return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, usecs); } static int _qed_mcp_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 cmd, u32 param, u32 *o_mcp_resp, u32 *o_mcp_param, bool can_sleep) { struct qed_mcp_mb_params mb_params; int rc; memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = cmd; mb_params.param = param; mb_params.flags = can_sleep ? QED_MB_FLAG_CAN_SLEEP : 0; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) return rc; *o_mcp_resp = mb_params.mcp_resp; *o_mcp_param = mb_params.mcp_param; return 0; } int qed_mcp_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 cmd, u32 param, u32 *o_mcp_resp, u32 *o_mcp_param) { return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp, o_mcp_param, true)); } int qed_mcp_cmd_nosleep(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 cmd, u32 param, u32 *o_mcp_resp, u32 *o_mcp_param) { return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp, o_mcp_param, false)); } static int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 cmd, u32 param, u32 *o_mcp_resp, u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf) { struct qed_mcp_mb_params mb_params; int rc; memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = cmd; mb_params.param = param; mb_params.p_data_src = i_buf; mb_params.data_src_size = (u8)i_txn_size; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) return rc; *o_mcp_resp = mb_params.mcp_resp; *o_mcp_param = mb_params.mcp_param; /* nvm_info needs to be updated */ p_hwfn->nvm_info.valid = false; return 0; } int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 cmd, u32 param, u32 *o_mcp_resp, u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf, bool b_can_sleep) { struct qed_mcp_mb_params mb_params; u8 raw_data[MCP_DRV_NVM_BUF_LEN]; int rc; memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = cmd; mb_params.param = param; mb_params.p_data_dst = raw_data; /* Use the maximal value since the actual one is part of the response */ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; if (b_can_sleep) mb_params.flags = QED_MB_FLAG_CAN_SLEEP; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) return rc; *o_mcp_resp = mb_params.mcp_resp; *o_mcp_param = mb_params.mcp_param; *o_txn_size = *o_mcp_param; memcpy(o_buf, raw_data, *o_txn_size); return 0; } static bool qed_mcp_can_force_load(u8 drv_role, u8 exist_drv_role, enum qed_override_force_load override_force_load) { bool can_force_load = false; switch (override_force_load) { case QED_OVERRIDE_FORCE_LOAD_ALWAYS: can_force_load = true; break; case QED_OVERRIDE_FORCE_LOAD_NEVER: can_force_load = false; break; default: can_force_load = (drv_role == DRV_ROLE_OS && exist_drv_role == DRV_ROLE_PREBOOT) || (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS); break; } return can_force_load; } static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 resp = 0, param = 0; int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0, &resp, &param); if (rc) DP_NOTICE(p_hwfn, "Failed to send cancel load request, rc = %d\n", rc); return rc; } #define BITMAP_IDX_FOR_CONFIG_QEDE BIT(0) #define BITMAP_IDX_FOR_CONFIG_QED_SRIOV BIT(1) #define BITMAP_IDX_FOR_CONFIG_QEDR BIT(2) #define BITMAP_IDX_FOR_CONFIG_QEDF BIT(4) #define BITMAP_IDX_FOR_CONFIG_QEDI BIT(5) #define BITMAP_IDX_FOR_CONFIG_QED_LL2 BIT(6) static u32 qed_get_config_bitmap(void) { u32 config_bitmap = 0x0; if (IS_ENABLED(CONFIG_QEDE)) config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDE; if (IS_ENABLED(CONFIG_QED_SRIOV)) config_bitmap |= BITMAP_IDX_FOR_CONFIG_QED_SRIOV; if (IS_ENABLED(CONFIG_QED_RDMA)) config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDR; if (IS_ENABLED(CONFIG_QED_FCOE)) config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDF; if (IS_ENABLED(CONFIG_QED_ISCSI)) config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDI; if (IS_ENABLED(CONFIG_QED_LL2)) config_bitmap |= BITMAP_IDX_FOR_CONFIG_QED_LL2; return config_bitmap; } struct qed_load_req_in_params { u8 hsi_ver; #define QED_LOAD_REQ_HSI_VER_DEFAULT 0 #define QED_LOAD_REQ_HSI_VER_1 1 u32 drv_ver_0; u32 drv_ver_1; u32 fw_ver; u8 drv_role; u8 timeout_val; u8 force_cmd; bool avoid_eng_reset; }; struct qed_load_req_out_params { u32 load_code; u32 exist_drv_ver_0; u32 exist_drv_ver_1; u32 exist_fw_ver; u8 exist_drv_role; u8 mfw_hsi_ver; bool drv_exists; }; static int __qed_mcp_load_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_load_req_in_params *p_in_params, struct qed_load_req_out_params *p_out_params) { struct qed_mcp_mb_params mb_params; struct load_req_stc load_req; struct load_rsp_stc load_rsp; u32 hsi_ver; int rc; memset(&load_req, 0, sizeof(load_req)); load_req.drv_ver_0 = p_in_params->drv_ver_0; load_req.drv_ver_1 = p_in_params->drv_ver_1; load_req.fw_ver = p_in_params->fw_ver; QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role); QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO, p_in_params->timeout_val); QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd); QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0, p_in_params->avoid_eng_reset); hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ? DRV_ID_MCP_HSI_VER_CURRENT : (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT); memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type; mb_params.p_data_src = &load_req; mb_params.data_src_size = sizeof(load_req); mb_params.p_data_dst = &load_rsp; mb_params.data_dst_size = sizeof(load_rsp); mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; DP_VERBOSE(p_hwfn, QED_MSG_SP, "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", mb_params.param, QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW), QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE), QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER), QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER)); if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) { DP_VERBOSE(p_hwfn, QED_MSG_SP, "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n", load_req.drv_ver_0, load_req.drv_ver_1, load_req.fw_ver, load_req.misc0, QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE), QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO), QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE), QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0)); } rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) { DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc); return rc; } DP_VERBOSE(p_hwfn, QED_MSG_SP, "Load Response: resp 0x%08x\n", mb_params.mcp_resp); p_out_params->load_code = mb_params.mcp_resp; if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 && p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { DP_VERBOSE(p_hwfn, QED_MSG_SP, "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n", load_rsp.drv_ver_0, load_rsp.drv_ver_1, load_rsp.fw_ver, load_rsp.misc0, QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE), QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI), QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0)); p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0; p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1; p_out_params->exist_fw_ver = load_rsp.fw_ver; p_out_params->exist_drv_role = QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE); p_out_params->mfw_hsi_ver = QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI); p_out_params->drv_exists = QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) & LOAD_RSP_FLAGS0_DRV_EXISTS; } return 0; } static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn, enum qed_drv_role drv_role, u8 *p_mfw_drv_role) { switch (drv_role) { case QED_DRV_ROLE_OS: *p_mfw_drv_role = DRV_ROLE_OS; break; case QED_DRV_ROLE_KDUMP: *p_mfw_drv_role = DRV_ROLE_KDUMP; break; default: DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role); return -EINVAL; } return 0; } enum qed_load_req_force { QED_LOAD_REQ_FORCE_NONE, QED_LOAD_REQ_FORCE_PF, QED_LOAD_REQ_FORCE_ALL, }; static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn, enum qed_load_req_force force_cmd, u8 *p_mfw_force_cmd) { switch (force_cmd) { case QED_LOAD_REQ_FORCE_NONE: *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE; break; case QED_LOAD_REQ_FORCE_PF: *p_mfw_force_cmd = LOAD_REQ_FORCE_PF; break; case QED_LOAD_REQ_FORCE_ALL: *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL; break; } } int qed_mcp_load_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_load_req_params *p_params) { struct qed_load_req_out_params out_params; struct qed_load_req_in_params in_params; u8 mfw_drv_role, mfw_force_cmd; int rc; memset(&in_params, 0, sizeof(in_params)); in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT; in_params.drv_ver_1 = qed_get_config_bitmap(); in_params.fw_ver = STORM_FW_VERSION; rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role); if (rc) return rc; in_params.drv_role = mfw_drv_role; in_params.timeout_val = p_params->timeout_val; qed_get_mfw_force_cmd(p_hwfn, QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd); in_params.force_cmd = mfw_force_cmd; in_params.avoid_eng_reset = p_params->avoid_eng_reset; memset(&out_params, 0, sizeof(out_params)); rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); if (rc) return rc; /* First handle cases where another load request should/might be sent: * - MFW expects the old interface [HSI version = 1] * - MFW responds that a force load request is required */ if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { DP_INFO(p_hwfn, "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n"); in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1; memset(&out_params, 0, sizeof(out_params)); rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); if (rc) return rc; } else if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) { if (qed_mcp_can_force_load(in_params.drv_role, out_params.exist_drv_role, p_params->override_force_load)) { DP_INFO(p_hwfn, "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n", in_params.drv_role, in_params.fw_ver, in_params.drv_ver_0, in_params.drv_ver_1, out_params.exist_drv_role, out_params.exist_fw_ver, out_params.exist_drv_ver_0, out_params.exist_drv_ver_1); qed_get_mfw_force_cmd(p_hwfn, QED_LOAD_REQ_FORCE_ALL, &mfw_force_cmd); in_params.force_cmd = mfw_force_cmd; memset(&out_params, 0, sizeof(out_params)); rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); if (rc) return rc; } else { DP_NOTICE(p_hwfn, "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n", in_params.drv_role, in_params.fw_ver, in_params.drv_ver_0, in_params.drv_ver_1, out_params.exist_drv_role, out_params.exist_fw_ver, out_params.exist_drv_ver_0, out_params.exist_drv_ver_1); DP_NOTICE(p_hwfn, "Avoid sending a force load request to prevent disruption of active PFs\n"); qed_mcp_cancel_load_req(p_hwfn, p_ptt); return -EBUSY; } } /* Now handle the other types of responses. * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not * expected here after the additional revised load requests were sent. */ switch (out_params.load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: case FW_MSG_CODE_DRV_LOAD_PORT: case FW_MSG_CODE_DRV_LOAD_FUNCTION: if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 && out_params.drv_exists) { /* The role and fw/driver version match, but the PF is * already loaded and has not been unloaded gracefully. */ DP_NOTICE(p_hwfn, "PF is already loaded\n"); return -EINVAL; } break; default: DP_NOTICE(p_hwfn, "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n", out_params.load_code); return -EBUSY; } p_params->load_code = out_params.load_code; return 0; } int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 resp = 0, param = 0; int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp, &param); if (rc) { DP_NOTICE(p_hwfn, "Failed to send a LOAD_DONE command, rc = %d\n", rc); return rc; } /* Check if there is a DID mismatch between nvm-cfg/efuse */ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) DP_NOTICE(p_hwfn, "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); return 0; } #define MFW_COMPLETION_MAX_ITER 5000 #define MFW_COMPLETION_INTERVAL_MS 1 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_mb_params mb_params; u32 cnt = MFW_COMPLETION_MAX_ITER; u32 wol_param; int rc; switch (p_hwfn->cdev->wol_config) { case QED_OV_WOL_DISABLED: wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED; break; case QED_OV_WOL_ENABLED: wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED; break; default: DP_NOTICE(p_hwfn, "Unknown WoL configuration %02x\n", p_hwfn->cdev->wol_config); fallthrough; case QED_OV_WOL_DEFAULT: wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; } memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ; mb_params.param = wol_param; mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; spin_lock_bh(&p_hwfn->mcp_info->unload_lock); set_bit(QED_MCP_BYPASS_PROC_BIT, &p_hwfn->mcp_info->mcp_handling_status); spin_unlock_bh(&p_hwfn->mcp_info->unload_lock); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); while (test_bit(QED_MCP_IN_PROCESSING_BIT, &p_hwfn->mcp_info->mcp_handling_status) && --cnt) msleep(MFW_COMPLETION_INTERVAL_MS); if (!cnt) DP_NOTICE(p_hwfn, "Failed to wait MFW event completion after %d msec\n", MFW_COMPLETION_MAX_ITER * MFW_COMPLETION_INTERVAL_MS); return rc; } int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_mb_params mb_params; struct mcp_mac wol_mac; memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE; /* Set the primary MAC if WoL is enabled */ if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) { u8 *p_mac = p_hwfn->cdev->wol_mac; memset(&wol_mac, 0, sizeof(wol_mac)); wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1]; wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 | p_mac[4] << 8 | p_mac[5]; DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFDOWN), "Setting WoL MAC: %pM --> [%08x,%08x]\n", p_mac, wol_mac.mac_upper, wol_mac.mac_lower); mb_params.p_data_src = &wol_mac; mb_params.data_src_size = sizeof(wol_mac); } return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); } static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_PATH); u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); u32 path_addr = SECTION_ADDR(mfw_path_offsize, QED_PATH_ID(p_hwfn)); u32 disabled_vfs[VF_MAX_STATIC / 32]; int i; DP_VERBOSE(p_hwfn, QED_MSG_SP, "Reading Disabled VF information from [offset %08x], path_addr %08x\n", mfw_path_offsize, path_addr); for (i = 0; i < (VF_MAX_STATIC / 32); i++) { disabled_vfs[i] = qed_rd(p_hwfn, p_ptt, path_addr + offsetof(struct public_path, mcp_vf_disabled) + sizeof(u32) * i); DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV), "FLR-ed VFs [%08x,...,%08x] - %08x\n", i * 32, (i + 1) * 32 - 1, disabled_vfs[i]); } if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs)) qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG); } int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *vfs_to_ack) { u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_FUNC); u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr); u32 func_addr = SECTION_ADDR(mfw_func_offsize, MCP_PF_ID(p_hwfn)); struct qed_mcp_mb_params mb_params; int rc; int i; for (i = 0; i < (VF_MAX_STATIC / 32); i++) DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV), "Acking VFs [%08x,...,%08x] - %08x\n", i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]); memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; mb_params.p_data_src = vfs_to_ack; mb_params.data_src_size = VF_MAX_STATIC / 8; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) { DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n"); return -EBUSY; } /* Clear the ACK bits */ for (i = 0; i < (VF_MAX_STATIC / 32); i++) qed_wr(p_hwfn, p_ptt, func_addr + offsetof(struct public_func, drv_ack_vf_disabled) + i * sizeof(u32), 0); return rc; } static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 transceiver_state; transceiver_state = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + offsetof(struct public_port, transceiver_data)); DP_VERBOSE(p_hwfn, (NETIF_MSG_HW | QED_MSG_SP), "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n", transceiver_state, (u32)(p_hwfn->mcp_info->port_addr + offsetof(struct public_port, transceiver_data))); transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE); if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) DP_NOTICE(p_hwfn, "Transceiver is present.\n"); else DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n"); } static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_link_state *p_link) { u32 eee_status, val; p_link->eee_adv_caps = 0; p_link->eee_lp_adv_caps = 0; eee_status = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + offsetof(struct public_port, eee_status)); p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT); val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET; if (val & EEE_1G_ADV) p_link->eee_adv_caps |= QED_EEE_1G_ADV; if (val & EEE_10G_ADV) p_link->eee_adv_caps |= QED_EEE_10G_ADV; val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET; if (val & EEE_1G_ADV) p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV; if (val & EEE_10G_ADV) p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV; } static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct public_func *p_data, int pfid) { u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_FUNC); u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); u32 func_addr; u32 i, size; func_addr = SECTION_ADDR(mfw_path_offsize, pfid); memset(p_data, 0, sizeof(*p_data)); size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize)); for (i = 0; i < size / sizeof(u32); i++) ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, func_addr + (i << 2)); return size; } static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, struct public_func *p_shmem_info) { struct qed_mcp_function_info *p_info; p_info = &p_hwfn->mcp_info->func_info; p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config, FUNC_MF_CFG_MIN_BW); if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { DP_INFO(p_hwfn, "bandwidth minimum out of bounds [%02x]. Set to 1\n", p_info->bandwidth_min); p_info->bandwidth_min = 1; } p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config, FUNC_MF_CFG_MAX_BW); if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { DP_INFO(p_hwfn, "bandwidth maximum out of bounds [%02x]. Set to 100\n", p_info->bandwidth_max); p_info->bandwidth_max = 100; } } static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_reset) { struct qed_mcp_link_state *p_link; u8 max_bw, min_bw; u32 status = 0; /* Prevent SW/attentions from doing this at the same time */ spin_lock_bh(&p_hwfn->mcp_info->link_lock); p_link = &p_hwfn->mcp_info->link_output; memset(p_link, 0, sizeof(*p_link)); if (!b_reset) { status = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + offsetof(struct public_port, link_status)); DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP), "Received link update [0x%08x] from mfw [Addr 0x%x]\n", status, (u32)(p_hwfn->mcp_info->port_addr + offsetof(struct public_port, link_status))); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link indications\n"); goto out; } if (p_hwfn->b_drv_link_init) { /* Link indication with modern MFW arrives as per-PF * indication. */ if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { struct public_func shmem_info; qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); p_link->link_up = !!(shmem_info.status & FUNC_STATUS_VIRTUAL_LINK_UP); qed_read_pf_bandwidth(p_hwfn, &shmem_info); DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Virtual link_up = %d\n", p_link->link_up); } else { p_link->link_up = !!(status & LINK_STATUS_LINK_UP); DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Physical link_up = %d\n", p_link->link_up); } } else { p_link->link_up = false; } p_link->full_duplex = true; switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { case LINK_STATUS_SPEED_AND_DUPLEX_100G: p_link->speed = 100000; break; case LINK_STATUS_SPEED_AND_DUPLEX_50G: p_link->speed = 50000; break; case LINK_STATUS_SPEED_AND_DUPLEX_40G: p_link->speed = 40000; break; case LINK_STATUS_SPEED_AND_DUPLEX_25G: p_link->speed = 25000; break; case LINK_STATUS_SPEED_AND_DUPLEX_20G: p_link->speed = 20000; break; case LINK_STATUS_SPEED_AND_DUPLEX_10G: p_link->speed = 10000; break; case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: p_link->full_duplex = false; fallthrough; case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: p_link->speed = 1000; break; default: p_link->speed = 0; p_link->link_up = 0; } if (p_link->link_up && p_link->speed) p_link->line_speed = p_link->speed; else p_link->line_speed = 0; max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; min_bw = p_hwfn->mcp_info->func_info.bandwidth_min; /* Max bandwidth configuration */ __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw); /* Min bandwidth configuration */ __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt, p_link->min_pf_rate); p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); p_link->parallel_detection = !!(status & LINK_STATUS_PARALLEL_DETECTION_USED); p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? QED_LINK_PARTNER_SPEED_1G_FD : 0; p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? QED_LINK_PARTNER_SPEED_1G_HD : 0; p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? QED_LINK_PARTNER_SPEED_10G : 0; p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? QED_LINK_PARTNER_SPEED_20G : 0; p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? QED_LINK_PARTNER_SPEED_25G : 0; p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? QED_LINK_PARTNER_SPEED_40G : 0; p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? QED_LINK_PARTNER_SPEED_50G : 0; p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? QED_LINK_PARTNER_SPEED_100G : 0; p_link->partner_tx_flow_ctrl_en = !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); p_link->partner_rx_flow_ctrl_en = !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE; break; case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE; break; case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE; break; default: p_link->partner_adv_pause = 0; } p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link); if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { switch (status & LINK_STATUS_FEC_MODE_MASK) { case LINK_STATUS_FEC_MODE_NONE: p_link->fec_active = QED_FEC_MODE_NONE; break; case LINK_STATUS_FEC_MODE_FIRECODE_CL74: p_link->fec_active = QED_FEC_MODE_FIRECODE; break; case LINK_STATUS_FEC_MODE_RS_CL91: p_link->fec_active = QED_FEC_MODE_RS; break; default: p_link->fec_active = QED_FEC_MODE_AUTO; } } else { p_link->fec_active = QED_FEC_MODE_UNSUPPORTED; } qed_link_update(p_hwfn, p_ptt); out: spin_unlock_bh(&p_hwfn->mcp_info->link_lock); } int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) { struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; struct qed_mcp_mb_params mb_params; struct eth_phy_cfg phy_cfg; u32 cmd, fec_bit = 0; u32 val, ext_speed; int rc = 0; /* Set the shmem configuration according to params */ memset(&phy_cfg, 0, sizeof(phy_cfg)); cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; if (!params->speed.autoneg) phy_cfg.speed = params->speed.forced_speed; phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; phy_cfg.adv_speed = params->speed.advertised_speeds; phy_cfg.loopback_mode = params->loopback_mode; /* There are MFWs that share this capability regardless of whether * this is feasible or not. And given that at the very least adv_caps * would be set internally by qed, we want to make sure LFA would * still work. */ if ((p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) { phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; if (params->eee.tx_lpi_enable) phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; if (params->eee.adv_caps & QED_EEE_1G_ADV) phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G; if (params->eee.adv_caps & QED_EEE_10G_ADV) phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G; phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer << EEE_TX_TIMER_USEC_OFFSET) & EEE_TX_TIMER_USEC_MASK; } if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { if (params->fec & QED_FEC_MODE_NONE) fec_bit |= FEC_FORCE_MODE_NONE; else if (params->fec & QED_FEC_MODE_FIRECODE) fec_bit |= FEC_FORCE_MODE_FIRECODE; else if (params->fec & QED_FEC_MODE_RS) fec_bit |= FEC_FORCE_MODE_RS; else if (params->fec & QED_FEC_MODE_AUTO) fec_bit |= FEC_FORCE_MODE_AUTO; SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit); } if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { ext_speed = 0; if (params->ext_speed.autoneg) ext_speed |= ETH_EXT_SPEED_NONE; val = params->ext_speed.forced_speed; if (val & QED_EXT_SPEED_1G) ext_speed |= ETH_EXT_SPEED_1G; if (val & QED_EXT_SPEED_10G) ext_speed |= ETH_EXT_SPEED_10G; if (val & QED_EXT_SPEED_25G) ext_speed |= ETH_EXT_SPEED_25G; if (val & QED_EXT_SPEED_40G) ext_speed |= ETH_EXT_SPEED_40G; if (val & QED_EXT_SPEED_50G_R) ext_speed |= ETH_EXT_SPEED_50G_BASE_R; if (val & QED_EXT_SPEED_50G_R2) ext_speed |= ETH_EXT_SPEED_50G_BASE_R2; if (val & QED_EXT_SPEED_100G_R2) ext_speed |= ETH_EXT_SPEED_100G_BASE_R2; if (val & QED_EXT_SPEED_100G_R4) ext_speed |= ETH_EXT_SPEED_100G_BASE_R4; if (val & QED_EXT_SPEED_100G_P4) ext_speed |= ETH_EXT_SPEED_100G_BASE_P4; SET_MFW_FIELD(phy_cfg.extended_speed, ETH_EXT_SPEED, ext_speed); ext_speed = 0; val = params->ext_speed.advertised_speeds; if (val & QED_EXT_SPEED_MASK_1G) ext_speed |= ETH_EXT_ADV_SPEED_1G; if (val & QED_EXT_SPEED_MASK_10G) ext_speed |= ETH_EXT_ADV_SPEED_10G; if (val & QED_EXT_SPEED_MASK_25G) ext_speed |= ETH_EXT_ADV_SPEED_25G; if (val & QED_EXT_SPEED_MASK_40G) ext_speed |= ETH_EXT_ADV_SPEED_40G; if (val & QED_EXT_SPEED_MASK_50G_R) ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R; if (val & QED_EXT_SPEED_MASK_50G_R2) ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R2; if (val & QED_EXT_SPEED_MASK_100G_R2) ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R2; if (val & QED_EXT_SPEED_MASK_100G_R4) ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R4; if (val & QED_EXT_SPEED_MASK_100G_P4) ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_P4; phy_cfg.extended_speed |= ext_speed; SET_MFW_FIELD(phy_cfg.fec_mode, FEC_EXTENDED_MODE, params->ext_fec_mode); } p_hwfn->b_drv_link_init = b_up; if (b_up) { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n", phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed, phy_cfg.loopback_mode, phy_cfg.fec_mode, phy_cfg.extended_speed); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n"); } memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = cmd; mb_params.p_data_src = &phy_cfg; mb_params.data_src_size = sizeof(phy_cfg); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); /* if mcp fails to respond we must abort */ if (rc) { DP_ERR(p_hwfn, "MCP response failure, aborting\n"); return rc; } /* Mimic link-change attention, done for several reasons: * - On reset, there's no guarantee MFW would trigger * an attention. * - On initialization, older MFWs might not indicate link change * during LFA, so we'll never get an UP indication. */ qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up); return 0; } u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt; if (IS_VF(p_hwfn->cdev)) return -EINVAL; path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_PATH); path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr); path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn)); proc_kill_cnt = qed_rd(p_hwfn, p_ptt, path_addr + offsetof(struct public_path, process_kill)) & PROCESS_KILL_COUNTER_MASK; return proc_kill_cnt; } static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_dev *cdev = p_hwfn->cdev; u32 proc_kill_cnt; /* Prevent possible attentions/interrupts during the recovery handling * and till its load phase, during which they will be re-enabled. */ qed_int_igu_disable_int(p_hwfn, p_ptt); DP_NOTICE(p_hwfn, "Received a process kill indication\n"); /* The following operations should be done once, and thus in CMT mode * are carried out by only the first HW function. */ if (p_hwfn != QED_LEADING_HWFN(cdev)) return; if (cdev->recov_in_prog) { DP_NOTICE(p_hwfn, "Ignoring the indication since a recovery process is already in progress\n"); return; } cdev->recov_in_prog = true; proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt); DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt); qed_schedule_recovery_handler(p_hwfn); } static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum MFW_DRV_MSG_TYPE type) { enum qed_mcp_protocol_type stats_type; union qed_mcp_protocol_stats stats; struct qed_mcp_mb_params mb_params; u32 hsi_param; switch (type) { case MFW_DRV_MSG_GET_LAN_STATS: stats_type = QED_MCP_LAN_STATS; hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; break; case MFW_DRV_MSG_GET_FCOE_STATS: stats_type = QED_MCP_FCOE_STATS; hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE; break; case MFW_DRV_MSG_GET_ISCSI_STATS: stats_type = QED_MCP_ISCSI_STATS; hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI; break; case MFW_DRV_MSG_GET_RDMA_STATS: stats_type = QED_MCP_RDMA_STATS; hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA; break; default: DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type); return; } qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats); memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_GET_STATS; mb_params.param = hsi_param; mb_params.p_data_src = &stats; mb_params.data_src_size = sizeof(stats); qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); } static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_function_info *p_info; struct public_func shmem_info; u32 resp = 0, param = 0; qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); qed_read_pf_bandwidth(p_hwfn, &shmem_info); p_info = &p_hwfn->mcp_info->func_info; qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min); qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max); /* Acknowledge the MFW */ qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, &param); } static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct public_func shmem_info; u32 resp = 0, param = 0; qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK; p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) { if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) { qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan); qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1); /* Configure DB to add external vlan to EDPM packets */ qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, p_hwfn->hw_info.ovlan); } else { qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0); qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0); qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0); qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0); } qed_sp_pf_update_stag(p_hwfn); } DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); /* Acknowledge the MFW */ qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, &resp, &param); } static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { /* A single notification should be sent to upper driver in CMT mode */ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev)) return; qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL, "Fan failure was detected on the network interface card and it's going to be shut down.\n"); } struct qed_mdump_cmd_params { u32 cmd; void *p_data_src; u8 data_src_size; void *p_data_dst; u8 data_dst_size; u32 mcp_resp; }; static int qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mdump_cmd_params *p_mdump_cmd_params) { struct qed_mcp_mb_params mb_params; int rc; memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD; mb_params.param = p_mdump_cmd_params->cmd; mb_params.p_data_src = p_mdump_cmd_params->p_data_src; mb_params.data_src_size = p_mdump_cmd_params->data_src_size; mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst; mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) return rc; p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp; if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) { DP_INFO(p_hwfn, "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n", p_mdump_cmd_params->cmd); rc = -EOPNOTSUPP; } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) { DP_INFO(p_hwfn, "The mdump command is not supported by the MFW\n"); rc = -EOPNOTSUPP; } return rc; } static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mdump_cmd_params mdump_cmd_params; memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params)); mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK; return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); } int qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct mdump_retain_data_stc *p_mdump_retain) { struct qed_mdump_cmd_params mdump_cmd_params; int rc; memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params)); mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN; mdump_cmd_params.p_data_dst = p_mdump_retain; mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain); rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); if (rc) return rc; if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { DP_INFO(p_hwfn, "Failed to get the mdump retained data [mcp_resp 0x%x]\n", mdump_cmd_params.mcp_resp); return -EINVAL; } return 0; } static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct mdump_retain_data_stc mdump_retain; int rc; /* In CMT mode - no need for more than a single acknowledgment to the * MFW, and no more than a single notification to the upper driver. */ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev)) return; rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain); if (rc == 0 && mdump_retain.valid) DP_NOTICE(p_hwfn, "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n", mdump_retain.epoch, mdump_retain.pf, mdump_retain.status); else DP_NOTICE(p_hwfn, "The MFW notified that a critical error occurred in the device\n"); DP_NOTICE(p_hwfn, "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n"); qed_mcp_mdump_ack(p_hwfn, p_ptt); qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL); } void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct public_func shmem_info; u32 port_cfg, val; if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) return; memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + offsetof(struct public_port, oem_cfg_port)); val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >> OEM_CFG_CHANNEL_TYPE_OFFSET; if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d port_id 0x%02x\n", val, MFW_PORT(p_hwfn)); val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET; if (val == OEM_CFG_SCHED_TYPE_ETS) { p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS; } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) { p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW; } else { p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN; DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d port_id 0x%02x\n", val, MFW_PORT(p_hwfn)); } qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >> OEM_CFG_FUNC_TC_OFFSET; p_hwfn->ufp_info.tc = (u8)val; val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >> OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET; if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) { p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC; } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) { p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS; } else { p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN; DP_NOTICE(p_hwfn, "Unknown Host priority control %d port_id 0x%02x\n", val, MFW_PORT(p_hwfn)); } DP_NOTICE(p_hwfn, "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n", p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn)); } static int qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { qed_mcp_read_ufp_config(p_hwfn, p_ptt); if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) { p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; qed_hw_info_set_offload_tc(&p_hwfn->hw_info, p_hwfn->ufp_info.tc); qed_qm_reconf(p_hwfn, p_ptt); } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) { /* Merge UFP TC with the dcbx TC data */ qed_dcbx_mib_update_event(p_hwfn, p_ptt, QED_DCBX_OPERATIONAL_MIB); } else { DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n"); return -EINVAL; } /* update storm FW with negotiation results */ qed_sp_pf_update_ufp(p_hwfn); /* update stag pcp value */ qed_sp_pf_update_stag(p_hwfn); return 0; } int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_info *info = p_hwfn->mcp_info; int rc = 0; bool found = false; u16 i; DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n"); /* Read Messages from MFW */ qed_mcp_read_mb(p_hwfn, p_ptt); /* Compare current messages to old ones */ for (i = 0; i < info->mfw_mb_length; i++) { if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) continue; found = true; DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); spin_lock_bh(&p_hwfn->mcp_info->unload_lock); if (test_bit(QED_MCP_BYPASS_PROC_BIT, &p_hwfn->mcp_info->mcp_handling_status)) { spin_unlock_bh(&p_hwfn->mcp_info->unload_lock); DP_INFO(p_hwfn, "Msg [%d] is bypassed on unload flow\n", i); continue; } set_bit(QED_MCP_IN_PROCESSING_BIT, &p_hwfn->mcp_info->mcp_handling_status); spin_unlock_bh(&p_hwfn->mcp_info->unload_lock); switch (i) { case MFW_DRV_MSG_LINK_CHANGE: qed_mcp_handle_link_change(p_hwfn, p_ptt, false); break; case MFW_DRV_MSG_VF_DISABLED: qed_mcp_handle_vf_flr(p_hwfn, p_ptt); break; case MFW_DRV_MSG_LLDP_DATA_UPDATED: qed_dcbx_mib_update_event(p_hwfn, p_ptt, QED_DCBX_REMOTE_LLDP_MIB); break; case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED: qed_dcbx_mib_update_event(p_hwfn, p_ptt, QED_DCBX_REMOTE_MIB); break; case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: qed_dcbx_mib_update_event(p_hwfn, p_ptt, QED_DCBX_OPERATIONAL_MIB); break; case MFW_DRV_MSG_OEM_CFG_UPDATE: qed_mcp_handle_ufp_event(p_hwfn, p_ptt); break; case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; case MFW_DRV_MSG_ERROR_RECOVERY: qed_mcp_handle_process_kill(p_hwfn, p_ptt); break; case MFW_DRV_MSG_GET_LAN_STATS: case MFW_DRV_MSG_GET_FCOE_STATS: case MFW_DRV_MSG_GET_ISCSI_STATS: case MFW_DRV_MSG_GET_RDMA_STATS: qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i); break; case MFW_DRV_MSG_BW_UPDATE: qed_mcp_update_bw(p_hwfn, p_ptt); break; case MFW_DRV_MSG_S_TAG_UPDATE: qed_mcp_update_stag(p_hwfn, p_ptt); break; case MFW_DRV_MSG_FAILURE_DETECTED: qed_mcp_handle_fan_failure(p_hwfn, p_ptt); break; case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED: qed_mcp_handle_critical_error(p_hwfn, p_ptt); break; case MFW_DRV_MSG_GET_TLV_REQ: qed_mfw_tlv_req(p_hwfn); break; default: DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); rc = -EINVAL; } clear_bit(QED_MCP_IN_PROCESSING_BIT, &p_hwfn->mcp_info->mcp_handling_status); } /* ACK everything */ for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]); /* MFW expect answer in BE, so we force write in that format */ qed_wr(p_hwfn, p_ptt, info->mfw_mb_addr + sizeof(u32) + MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * sizeof(u32) + i * sizeof(u32), (__force u32)val); } if (!found) { DP_NOTICE(p_hwfn, "Received an MFW message indication but no new message!\n"); rc = -EINVAL; } /* Copy the new mfw messages into the shadow */ memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); return rc; } int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mfw_ver, u32 *p_running_bundle_id) { u32 global_offsize, public_base; if (IS_VF(p_hwfn->cdev)) { if (p_hwfn->vf_iov_info) { struct pfvf_acquire_resp_tlv *p_resp; p_resp = &p_hwfn->vf_iov_info->acquire_resp; *p_mfw_ver = p_resp->pfdev_info.mfw_ver; return 0; } else { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF requested MFW version prior to ACQUIRE\n"); return -EINVAL; } } public_base = p_hwfn->mcp_info->public_base; global_offsize = qed_rd(p_hwfn, p_ptt, SECTION_OFFSIZE_ADDR(public_base, PUBLIC_GLOBAL)); *p_mfw_ver = qed_rd(p_hwfn, p_ptt, SECTION_ADDR(global_offsize, 0) + offsetof(struct public_global, mfw_ver)); if (p_running_bundle_id) { *p_running_bundle_id = qed_rd(p_hwfn, p_ptt, SECTION_ADDR(global_offsize, 0) + offsetof(struct public_global, running_bundle_id)); } return 0; } int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mbi_ver) { u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr; if (IS_VF(p_hwfn->cdev)) return -EINVAL; /* Read the address of the nvm_cfg */ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); if (!nvm_cfg_addr) { DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); return -EINVAL; } /* Read the offset of nvm_cfg1 */ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, glob) + offsetof(struct nvm_cfg1_glob, mbi_version); *p_mbi_ver = qed_rd(p_hwfn, p_ptt, mbi_ver_addr) & (NVM_CFG1_GLOB_MBI_VERSION_0_MASK | NVM_CFG1_GLOB_MBI_VERSION_1_MASK | NVM_CFG1_GLOB_MBI_VERSION_2_MASK); return 0; } int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_media_type) { *p_media_type = MEDIA_UNSPECIFIED; if (IS_VF(p_hwfn->cdev)) return -EINVAL; if (!qed_mcp_is_init(p_hwfn)) { DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); return -EBUSY; } if (!p_ptt) { *p_media_type = MEDIA_UNSPECIFIED; return -EINVAL; } *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + offsetof(struct public_port, media_type)); return 0; } int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_transceiver_state, u32 *p_transceiver_type) { u32 transceiver_info; *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE; *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING; if (IS_VF(p_hwfn->cdev)) return -EINVAL; if (!qed_mcp_is_init(p_hwfn)) { DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); return -EBUSY; } transceiver_info = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + offsetof(struct public_port, transceiver_data)); *p_transceiver_state = (transceiver_info & ETH_TRANSCEIVER_STATE_MASK) >> ETH_TRANSCEIVER_STATE_OFFSET; if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) *p_transceiver_type = (transceiver_info & ETH_TRANSCEIVER_TYPE_MASK) >> ETH_TRANSCEIVER_TYPE_OFFSET; else *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN; return 0; } static bool qed_is_transceiver_ready(u32 transceiver_state, u32 transceiver_type) { if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) && ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) && (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE)) return true; return false; } int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_speed_mask) { u32 transceiver_type, transceiver_state; int ret; ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state, &transceiver_type); if (ret) return ret; if (qed_is_transceiver_ready(transceiver_state, transceiver_type) == false) return -EINVAL; switch (transceiver_type) { case ETH_TRANSCEIVER_TYPE_1G_LX: case ETH_TRANSCEIVER_TYPE_1G_SX: case ETH_TRANSCEIVER_TYPE_1G_PCC: case ETH_TRANSCEIVER_TYPE_1G_ACC: case ETH_TRANSCEIVER_TYPE_1000BASET: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; break; case ETH_TRANSCEIVER_TYPE_10G_SR: case ETH_TRANSCEIVER_TYPE_10G_LR: case ETH_TRANSCEIVER_TYPE_10G_LRM: case ETH_TRANSCEIVER_TYPE_10G_ER: case ETH_TRANSCEIVER_TYPE_10G_PCC: case ETH_TRANSCEIVER_TYPE_10G_ACC: case ETH_TRANSCEIVER_TYPE_4x10G: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; break; case ETH_TRANSCEIVER_TYPE_40G_LR4: case ETH_TRANSCEIVER_TYPE_40G_SR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; break; case ETH_TRANSCEIVER_TYPE_100G_AOC: case ETH_TRANSCEIVER_TYPE_100G_SR4: case ETH_TRANSCEIVER_TYPE_100G_LR4: case ETH_TRANSCEIVER_TYPE_100G_ER4: case ETH_TRANSCEIVER_TYPE_100G_ACC: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; break; case ETH_TRANSCEIVER_TYPE_25G_SR: case ETH_TRANSCEIVER_TYPE_25G_LR: case ETH_TRANSCEIVER_TYPE_25G_AOC: case ETH_TRANSCEIVER_TYPE_25G_ACC_S: case ETH_TRANSCEIVER_TYPE_25G_ACC_M: case ETH_TRANSCEIVER_TYPE_25G_ACC_L: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; break; case ETH_TRANSCEIVER_TYPE_25G_CA_N: case ETH_TRANSCEIVER_TYPE_25G_CA_S: case ETH_TRANSCEIVER_TYPE_25G_CA_L: case ETH_TRANSCEIVER_TYPE_4x25G_CR: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; break; case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; break; case ETH_TRANSCEIVER_TYPE_40G_CR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; break; case ETH_TRANSCEIVER_TYPE_100G_CR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; break; case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; break; case ETH_TRANSCEIVER_TYPE_XLPPI: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; break; case ETH_TRANSCEIVER_TYPE_10G_BASET: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; break; default: DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n", transceiver_type); *p_speed_mask = 0xff; break; } return 0; } int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_board_config) { u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr; if (IS_VF(p_hwfn->cdev)) return -EINVAL; if (!qed_mcp_is_init(p_hwfn)) { DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); return -EBUSY; } if (!p_ptt) { *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; return -EINVAL; } nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); *p_board_config = qed_rd(p_hwfn, p_ptt, port_cfg_addr + offsetof(struct nvm_cfg1_port, board_cfg)); return 0; } /* Old MFW has a global configuration for all PFs regarding RDMA support */ static void qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn, enum qed_pci_personality *p_proto) { /* There wasn't ever a legacy MFW that published iwarp. * So at this point, this is either plain l2 or RoCE. */ if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities)) *p_proto = QED_PCI_ETH_ROCE; else *p_proto = QED_PCI_ETH; DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "According to Legacy capabilities, L2 personality is %08x\n", (u32)*p_proto); } static int qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_pci_personality *p_proto) { u32 resp = 0, param = 0; int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param); if (rc) return rc; if (resp != FW_MSG_CODE_OK) { DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "MFW lacks support for command; Returns %08x\n", resp); return -EINVAL; } switch (param) { case FW_MB_PARAM_GET_PF_RDMA_NONE: *p_proto = QED_PCI_ETH; break; case FW_MB_PARAM_GET_PF_RDMA_ROCE: *p_proto = QED_PCI_ETH_ROCE; break; case FW_MB_PARAM_GET_PF_RDMA_IWARP: *p_proto = QED_PCI_ETH_IWARP; break; case FW_MB_PARAM_GET_PF_RDMA_BOTH: *p_proto = QED_PCI_ETH_RDMA; break; default: DP_NOTICE(p_hwfn, "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n", param); return -EINVAL; } DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", (u32)*p_proto, resp, param); return 0; } static int qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, struct public_func *p_info, struct qed_ptt *p_ptt, enum qed_pci_personality *p_proto) { int rc = 0; switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { case FUNC_MF_CFG_PROTOCOL_ETHERNET: if (!IS_ENABLED(CONFIG_QED_RDMA)) *p_proto = QED_PCI_ETH; else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto)) qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); break; case FUNC_MF_CFG_PROTOCOL_ISCSI: *p_proto = QED_PCI_ISCSI; break; case FUNC_MF_CFG_PROTOCOL_FCOE: *p_proto = QED_PCI_FCOE; break; case FUNC_MF_CFG_PROTOCOL_ROCE: DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n"); fallthrough; default: rc = -EINVAL; } return rc; } int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_function_info *info; struct public_func shmem_info; qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); info = &p_hwfn->mcp_info->func_info; info->pause_on_host = (shmem_info.config & FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, &info->protocol)) { DP_ERR(p_hwfn, "Unknown personality %08x\n", (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); return -EINVAL; } qed_read_pf_bandwidth(p_hwfn, &shmem_info); if (shmem_info.mac_upper || shmem_info.mac_lower) { info->mac[0] = (u8)(shmem_info.mac_upper >> 8); info->mac[1] = (u8)(shmem_info.mac_upper); info->mac[2] = (u8)(shmem_info.mac_lower >> 24); info->mac[3] = (u8)(shmem_info.mac_lower >> 16); info->mac[4] = (u8)(shmem_info.mac_lower >> 8); info->mac[5] = (u8)(shmem_info.mac_lower); /* Store primary MAC for later possible WoL */ memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN); } else { DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n"); } info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower | (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32); info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower | (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32); info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); info->mtu = (u16)shmem_info.mtu_size; p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE; p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT; if (qed_mcp_is_init(p_hwfn)) { u32 resp = 0, param = 0; int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OS_WOL, 0, &resp, &param); if (rc) return rc; if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED) p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME; } DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP), "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %pM wwn port %llx node %llx ovlan %04x wol %02x\n", info->pause_on_host, info->protocol, info->bandwidth_min, info->bandwidth_max, info->mac, info->wwn_port, info->wwn_node, info->ovlan, (u8)p_hwfn->hw_info.b_wol_support); return 0; } struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn) { if (!p_hwfn || !p_hwfn->mcp_info) return NULL; return &p_hwfn->mcp_info->link_input; } struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn) { if (!p_hwfn || !p_hwfn->mcp_info) return NULL; return &p_hwfn->mcp_info->link_output; } struct qed_mcp_link_capabilities *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn) { if (!p_hwfn || !p_hwfn->mcp_info) return NULL; return &p_hwfn->mcp_info->link_capabilities; } int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 resp = 0, param = 0; int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param); /* Wait for the drain to complete before returning */ msleep(1020); return rc; } int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_flash_size) { u32 flash_size; if (IS_VF(p_hwfn->cdev)) return -EINVAL; flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT)); *p_flash_size = flash_size; return 0; } int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_dev *cdev = p_hwfn->cdev; if (cdev->recov_in_prog) { DP_NOTICE(p_hwfn, "Avoid triggering a recovery since such a process is already in progress\n"); return -EAGAIN; } DP_NOTICE(p_hwfn, "Triggering a recovery process\n"); qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1); return 0; } #define QED_RECOVERY_PROLOG_SLEEP_MS 100 int qed_recovery_prolog(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; int rc; /* Allow ongoing PCIe transactions to complete */ msleep(QED_RECOVERY_PROLOG_SLEEP_MS); /* Clear the PF's internal FID_enable in the PXP */ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); if (rc) DP_NOTICE(p_hwfn, "qed_pglueb_set_pfid_enable() failed. rc = %d.\n", rc); return rc; } static int qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 vf_id, u8 num) { u32 resp = 0, param = 0, rc_param = 0; int rc; /* Only Leader can configure MSIX, and need to take CMT into account */ if (!IS_LEAD_HWFN(p_hwfn)) return 0; num *= p_hwfn->cdev->num_hwfns; param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) & DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) & DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, &resp, &rc_param); if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) { DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id); rc = -EINVAL; } else { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n", num, vf_id); } return rc; } static int qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 num) { u32 resp = 0, param = num, rc_param = 0; int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX, param, &resp, &rc_param); if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) { DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n"); rc = -EINVAL; } else { DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Requested 0x%02x MSI-x interrupts for VFs\n", num); } return rc; } int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 vf_id, u8 num) { if (QED_IS_BB(p_hwfn->cdev)) return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num); else return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num); } int qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_drv_version *p_ver) { struct qed_mcp_mb_params mb_params; struct drv_version_stc drv_version; __be32 val; u32 i; int rc; memset(&drv_version, 0, sizeof(drv_version)); drv_version.version = p_ver->version; for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) { val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)])); *(__be32 *)&drv_version.name[i * sizeof(u32)] = val; } memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_SET_VERSION; mb_params.p_data_src = &drv_version; mb_params.data_src_size = sizeof(drv_version); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) DP_ERR(p_hwfn, "MCP response failure, aborting\n"); return rc; } /* A maximal 100 msec waiting time for the MCP to halt */ #define QED_MCP_HALT_SLEEP_MS 10 #define QED_MCP_HALT_MAX_RETRIES 10 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 resp = 0, param = 0, cpu_state, cnt = 0; int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, &param); if (rc) { DP_ERR(p_hwfn, "MCP response failure, aborting\n"); return rc; } do { msleep(QED_MCP_HALT_SLEEP_MS); cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) break; } while (++cnt < QED_MCP_HALT_MAX_RETRIES); if (cnt == QED_MCP_HALT_MAX_RETRIES) { DP_NOTICE(p_hwfn, "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); return -EBUSY; } qed_mcp_cmd_set_blocking(p_hwfn, true); return 0; } #define QED_MCP_RESUME_SLEEP_MS 10 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 cpu_mode, cpu_state; qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); msleep(QED_MCP_RESUME_SLEEP_MS); cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { DP_NOTICE(p_hwfn, "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", cpu_mode, cpu_state); return -EBUSY; } qed_mcp_cmd_set_blocking(p_hwfn, false); return 0; } int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_client client) { u32 resp = 0, param = 0; u32 drv_mb_param; int rc; switch (client) { case QED_OV_CLIENT_DRV: drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS; break; case QED_OV_CLIENT_USER: drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER; break; case QED_OV_CLIENT_VENDOR_SPEC: drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC; break; default: DP_NOTICE(p_hwfn, "Invalid client type %d\n", client); return -EINVAL; } rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG, drv_mb_param, &resp, &param); if (rc) DP_ERR(p_hwfn, "MCP response failure, aborting\n"); return rc; } int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_driver_state drv_state) { u32 resp = 0, param = 0; u32 drv_mb_param; int rc; switch (drv_state) { case QED_OV_DRIVER_STATE_NOT_LOADED: drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED; break; case QED_OV_DRIVER_STATE_DISABLED: drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED; break; case QED_OV_DRIVER_STATE_ACTIVE: drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE; break; default: DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state); return -EINVAL; } rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE, drv_mb_param, &resp, &param); if (rc) DP_ERR(p_hwfn, "Failed to send driver state\n"); return rc; } int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 mtu) { u32 resp = 0, param = 0; u32 drv_mb_param; int rc; drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT; rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU, drv_mb_param, &resp, &param); if (rc) DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc); return rc; } int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, const u8 *mac) { struct qed_mcp_mb_params mb_params; u32 mfw_mac[2]; int rc; memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_SET_VMAC; mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC << DRV_MSG_CODE_VMAC_TYPE_SHIFT; mb_params.param |= MCP_PF_ID(p_hwfn); /* MCP is BE, and on LE platforms PCI would swap access to SHMEM * in 32-bit granularity. * So the MAC has to be set in native order [and not byte order], * otherwise it would be read incorrectly by MFW after swap. */ mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3]; mfw_mac[1] = mac[4] << 24 | mac[5] << 16; mb_params.p_data_src = (u8 *)mfw_mac; mb_params.data_src_size = 8; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc); /* Store primary MAC for later possible WoL */ memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN); return rc; } int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_wol wol) { u32 resp = 0, param = 0; u32 drv_mb_param; int rc; if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) { DP_VERBOSE(p_hwfn, QED_MSG_SP, "Can't change WoL configuration when WoL isn't supported\n"); return -EINVAL; } switch (wol) { case QED_OV_WOL_DEFAULT: drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT; break; case QED_OV_WOL_DISABLED: drv_mb_param = DRV_MB_PARAM_WOL_DISABLED; break; case QED_OV_WOL_ENABLED: drv_mb_param = DRV_MB_PARAM_WOL_ENABLED; break; default: DP_ERR(p_hwfn, "Invalid wol state %d\n", wol); return -EINVAL; } rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL, drv_mb_param, &resp, &param); if (rc) DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc); /* Store the WoL update for a future unload */ p_hwfn->cdev->wol_config = (u8)wol; return rc; } int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_eswitch eswitch) { u32 resp = 0, param = 0; u32 drv_mb_param; int rc; switch (eswitch) { case QED_OV_ESWITCH_NONE: drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE; break; case QED_OV_ESWITCH_VEB: drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB; break; case QED_OV_ESWITCH_VEPA: drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA; break; default: DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch); return -EINVAL; } rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE, drv_mb_param, &resp, &param); if (rc) DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc); return rc; } int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_led_mode mode) { u32 resp = 0, param = 0, drv_mb_param; int rc; switch (mode) { case QED_LED_MODE_ON: drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; break; case QED_LED_MODE_OFF: drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; break; case QED_LED_MODE_RESTORE: drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; break; default: DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode); return -EINVAL; } rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, drv_mb_param, &resp, &param); return rc; } int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 mask_parities) { u32 resp = 0, param = 0; int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES, mask_parities, &resp, &param); if (rc) { DP_ERR(p_hwfn, "MCP response failure for mask parities, aborting\n"); } else if (resp != FW_MSG_CODE_OK) { DP_ERR(p_hwfn, "MCP did not acknowledge mask parity request. Old MFW?\n"); rc = -EINVAL; } return rc; } int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) { u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0; struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); u32 resp = 0, resp_param = 0; struct qed_ptt *p_ptt; int rc = 0; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EBUSY; while (bytes_left > 0) { bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN); rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_READ_NVRAM, addr + offset + (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_OFFSET), &resp, &resp_param, &read_len, (u32 *)(p_buf + offset), false); if (rc || (resp != FW_MSG_CODE_NVM_OK)) { DP_NOTICE(cdev, "MCP command rc = %d\n", rc); break; } /* This can be a lengthy process, and it's possible scheduler * isn't preemptible. Sleep a bit to prevent CPU hogging. */ if (bytes_left % 0x1000 < (bytes_left - read_len) % 0x1000) usleep_range(1000, 2000); offset += read_len; bytes_left -= read_len; } cdev->mcp_nvm_resp = resp; qed_ptt_release(p_hwfn, p_ptt); return rc; } int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EBUSY; memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp)); qed_ptt_release(p_hwfn, p_ptt); return 0; } int qed_mcp_nvm_write(struct qed_dev *cdev, u32 cmd, u32 addr, u8 *p_buf, u32 len) { u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param; struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt; int rc = -EINVAL; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) return -EBUSY; switch (cmd) { case QED_PUT_FILE_BEGIN: nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN; break; case QED_PUT_FILE_DATA: nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; break; case QED_NVM_WRITE_NVRAM: nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM; break; default: DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd); rc = -EINVAL; goto out; } buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN); while (buf_idx < len) { if (cmd == QED_PUT_FILE_BEGIN) nvm_offset = addr; else nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) + buf_idx; rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset, &resp, &param, buf_size, (u32 *)&p_buf[buf_idx]); if (rc) { DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc); resp = FW_MSG_CODE_ERROR; break; } if (resp != FW_MSG_CODE_OK && resp != FW_MSG_CODE_NVM_OK && resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) { DP_NOTICE(cdev, "nvm write failed, resp = 0x%08x\n", resp); rc = -EINVAL; break; } /* This can be a lengthy process, and it's possible scheduler * isn't pre-emptable. Sleep a bit to prevent CPU hogging. */ if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000) usleep_range(1000, 2000); /* For MBI upgrade, MFW response includes the next buffer offset * to be delivered to MFW. */ if (param && cmd == QED_PUT_FILE_DATA) { buf_idx = QED_MFW_GET_FIELD(param, FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET); buf_size = QED_MFW_GET_FIELD(param, FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE); } else { buf_idx += buf_size; buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN); } } cdev->mcp_nvm_resp = resp; out: qed_ptt_release(p_hwfn, p_ptt); return rc; } int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf) { u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0; u32 resp, param; int rc; nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) & DRV_MB_PARAM_TRANSCEIVER_PORT_MASK; nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) & DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK; addr = offset; offset = 0; bytes_left = len; while (bytes_left > 0) { bytes_to_copy = min_t(u32, bytes_left, MAX_I2C_TRANSACTION_SIZE); nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); nvm_offset |= ((addr + offset) << DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) & DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK; nvm_offset |= (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) & DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK; rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_TRANSCEIVER_READ, nvm_offset, &resp, &param, &buf_size, (u32 *)(p_buf + offset), true); if (rc) { DP_NOTICE(p_hwfn, "Failed to send a transceiver read command to the MFW. rc = %d.\n", rc); return rc; } if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) return -ENODEV; else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) return -EINVAL; offset += buf_size; bytes_left -= buf_size; } return 0; } int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 drv_mb_param = 0, rsp, param; int rc = 0; drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST << DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, drv_mb_param, &rsp, &param); if (rc) return rc; if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || (param != DRV_MB_PARAM_BIST_RC_PASSED)) rc = -EAGAIN; return rc; } int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 drv_mb_param, rsp, param; int rc = 0; drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST << DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, drv_mb_param, &rsp, &param); if (rc) return rc; if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || (param != DRV_MB_PARAM_BIST_RC_PASSED)) rc = -EAGAIN; return rc; } int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *num_images) { u32 drv_mb_param = 0, rsp; int rc = 0; drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES << DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, drv_mb_param, &rsp, num_images); if (rc) return rc; if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) rc = -EINVAL; return rc; } int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct bist_nvm_image_att *p_image_att, u32 image_index) { u32 buf_size = 0, param, resp = 0, resp_param = 0; int rc; param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX << DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT; param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT; rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, param, &resp, &resp_param, &buf_size, (u32 *)p_image_att, false); if (rc) return rc; if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || (p_image_att->return_code != 1)) rc = -EINVAL; return rc; } int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) { struct qed_nvm_image_info nvm_info; struct qed_ptt *p_ptt; int rc; u32 i; if (p_hwfn->nvm_info.valid) return 0; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) { DP_ERR(p_hwfn, "failed to acquire ptt\n"); return -EBUSY; } /* Acquire from MFW the amount of available images */ nvm_info.num_images = 0; rc = qed_mcp_bist_nvm_get_num_images(p_hwfn, p_ptt, &nvm_info.num_images); if (rc == -EOPNOTSUPP) { DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n"); goto out; } else if (rc || !nvm_info.num_images) { DP_ERR(p_hwfn, "Failed getting number of images\n"); goto err0; } nvm_info.image_att = kmalloc_array(nvm_info.num_images, sizeof(struct bist_nvm_image_att), GFP_KERNEL); if (!nvm_info.image_att) { rc = -ENOMEM; goto err0; } /* Iterate over images and get their attributes */ for (i = 0; i < nvm_info.num_images; i++) { rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt, &nvm_info.image_att[i], i); if (rc) { DP_ERR(p_hwfn, "Failed getting image index %d attributes\n", i); goto err1; } DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i, nvm_info.image_att[i].len); } out: /* Update hwfn's nvm_info */ if (nvm_info.num_images) { p_hwfn->nvm_info.num_images = nvm_info.num_images; kfree(p_hwfn->nvm_info.image_att); p_hwfn->nvm_info.image_att = nvm_info.image_att; p_hwfn->nvm_info.valid = true; } qed_ptt_release(p_hwfn, p_ptt); return 0; err1: kfree(nvm_info.image_att); err0: qed_ptt_release(p_hwfn, p_ptt); return rc; } void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn) { kfree(p_hwfn->nvm_info.image_att); p_hwfn->nvm_info.image_att = NULL; p_hwfn->nvm_info.valid = false; } int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, struct qed_nvm_image_att *p_image_att) { enum nvm_image_type type; int rc; u32 i; /* Translate image_id into MFW definitions */ switch (image_id) { case QED_NVM_IMAGE_ISCSI_CFG: type = NVM_TYPE_ISCSI_CFG; break; case QED_NVM_IMAGE_FCOE_CFG: type = NVM_TYPE_FCOE_CFG; break; case QED_NVM_IMAGE_MDUMP: type = NVM_TYPE_MDUMP; break; case QED_NVM_IMAGE_NVM_CFG1: type = NVM_TYPE_NVM_CFG1; break; case QED_NVM_IMAGE_DEFAULT_CFG: type = NVM_TYPE_DEFAULT_CFG; break; case QED_NVM_IMAGE_NVM_META: type = NVM_TYPE_NVM_META; break; default: DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n", image_id); return -EINVAL; } rc = qed_mcp_nvm_info_populate(p_hwfn); if (rc) return rc; for (i = 0; i < p_hwfn->nvm_info.num_images; i++) if (type == p_hwfn->nvm_info.image_att[i].image_type) break; if (i == p_hwfn->nvm_info.num_images) { DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, "Failed to find nvram image of type %08x\n", image_id); return -ENOENT; } p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; p_image_att->length = p_hwfn->nvm_info.image_att[i].len; return 0; } int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, u8 *p_buffer, u32 buffer_len) { struct qed_nvm_image_att image_att; int rc; memset(p_buffer, 0, buffer_len); rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att); if (rc) return rc; /* Validate sizes - both the image's and the supplied buffer's */ if (image_att.length <= 4) { DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, "Image [%d] is too small - only %d bytes\n", image_id, image_att.length); return -EINVAL; } if (image_att.length > buffer_len) { DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, "Image [%d] is too big - %08x bytes where only %08x are available\n", image_id, image_att.length, buffer_len); return -ENOMEM; } return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr, p_buffer, image_att.length); } static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id) { enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; switch (res_id) { case QED_SB: mfw_res_id = RESOURCE_NUM_SB_E; break; case QED_L2_QUEUE: mfw_res_id = RESOURCE_NUM_L2_QUEUE_E; break; case QED_VPORT: mfw_res_id = RESOURCE_NUM_VPORT_E; break; case QED_RSS_ENG: mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E; break; case QED_PQ: mfw_res_id = RESOURCE_NUM_PQ_E; break; case QED_RL: mfw_res_id = RESOURCE_NUM_RL_E; break; case QED_MAC: case QED_VLAN: /* Each VFC resource can accommodate both a MAC and a VLAN */ mfw_res_id = RESOURCE_VFC_FILTER_E; break; case QED_ILT: mfw_res_id = RESOURCE_ILT_E; break; case QED_LL2_RAM_QUEUE: mfw_res_id = RESOURCE_LL2_QUEUE_E; break; case QED_LL2_CTX_QUEUE: mfw_res_id = RESOURCE_LL2_CQS_E; break; case QED_RDMA_CNQ_RAM: case QED_CMDQS_CQS: /* CNQ/CMDQS are the same resource */ mfw_res_id = RESOURCE_CQS_E; break; case QED_RDMA_STATS_QUEUE: mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E; break; case QED_BDQ: mfw_res_id = RESOURCE_BDQ_E; break; default: break; } return mfw_res_id; } #define QED_RESC_ALLOC_VERSION_MAJOR 2 #define QED_RESC_ALLOC_VERSION_MINOR 0 #define QED_RESC_ALLOC_VERSION \ ((QED_RESC_ALLOC_VERSION_MAJOR << \ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \ (QED_RESC_ALLOC_VERSION_MINOR << \ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT)) struct qed_resc_alloc_in_params { u32 cmd; enum qed_resources res_id; u32 resc_max_val; }; struct qed_resc_alloc_out_params { u32 mcp_resp; u32 mcp_param; u32 resc_num; u32 resc_start; u32 vf_resc_num; u32 vf_resc_start; u32 flags; }; static int qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_resc_alloc_in_params *p_in_params, struct qed_resc_alloc_out_params *p_out_params) { struct qed_mcp_mb_params mb_params; struct resource_info mfw_resc_info; int rc; memset(&mfw_resc_info, 0, sizeof(mfw_resc_info)); mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id); if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) { DP_ERR(p_hwfn, "Failed to match resource %d [%s] with the MFW resources\n", p_in_params->res_id, qed_hw_get_resc_name(p_in_params->res_id)); return -EINVAL; } switch (p_in_params->cmd) { case DRV_MSG_SET_RESOURCE_VALUE_MSG: mfw_resc_info.size = p_in_params->resc_max_val; fallthrough; case DRV_MSG_GET_RESOURCE_ALLOC_MSG: break; default: DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n", p_in_params->cmd); return -EINVAL; } memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = p_in_params->cmd; mb_params.param = QED_RESC_ALLOC_VERSION; mb_params.p_data_src = &mfw_resc_info; mb_params.data_src_size = sizeof(mfw_resc_info); mb_params.p_data_dst = mb_params.p_data_src; mb_params.data_dst_size = mb_params.data_src_size; DP_VERBOSE(p_hwfn, QED_MSG_SP, "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n", p_in_params->cmd, p_in_params->res_id, qed_hw_get_resc_name(p_in_params->res_id), QED_MFW_GET_FIELD(mb_params.param, DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), QED_MFW_GET_FIELD(mb_params.param, DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), p_in_params->resc_max_val); rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) return rc; p_out_params->mcp_resp = mb_params.mcp_resp; p_out_params->mcp_param = mb_params.mcp_param; p_out_params->resc_num = mfw_resc_info.size; p_out_params->resc_start = mfw_resc_info.offset; p_out_params->vf_resc_num = mfw_resc_info.vf_size; p_out_params->vf_resc_start = mfw_resc_info.vf_offset; p_out_params->flags = mfw_resc_info.flags; DP_VERBOSE(p_hwfn, QED_MSG_SP, "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n", QED_MFW_GET_FIELD(p_out_params->mcp_param, FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), QED_MFW_GET_FIELD(p_out_params->mcp_param, FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), p_out_params->resc_num, p_out_params->resc_start, p_out_params->vf_resc_num, p_out_params->vf_resc_start, p_out_params->flags); return 0; } int qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_resources res_id, u32 resc_max_val, u32 *p_mcp_resp) { struct qed_resc_alloc_out_params out_params; struct qed_resc_alloc_in_params in_params; int rc; memset(&in_params, 0, sizeof(in_params)); in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG; in_params.res_id = res_id; in_params.resc_max_val = resc_max_val; memset(&out_params, 0, sizeof(out_params)); rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, &out_params); if (rc) return rc; *p_mcp_resp = out_params.mcp_resp; return 0; } int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_resources res_id, u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start) { struct qed_resc_alloc_out_params out_params; struct qed_resc_alloc_in_params in_params; int rc; memset(&in_params, 0, sizeof(in_params)); in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; in_params.res_id = res_id; memset(&out_params, 0, sizeof(out_params)); rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, &out_params); if (rc) return rc; *p_mcp_resp = out_params.mcp_resp; if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) { *p_resc_num = out_params.resc_num; *p_resc_start = out_params.resc_start; } return 0; } int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 mcp_resp, mcp_param; return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0, &mcp_resp, &mcp_param); } static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 param, u32 *p_mcp_resp, u32 *p_mcp_param) { int rc; rc = qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param, p_mcp_resp, p_mcp_param); if (rc) return rc; if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) { DP_INFO(p_hwfn, "The resource command is unsupported by the MFW\n"); return -EINVAL; } if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) { u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE); DP_NOTICE(p_hwfn, "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n", param, opcode); return -EINVAL; } return rc; } static int __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params) { u32 param = 0, mcp_resp, mcp_param; u8 opcode; int rc; switch (p_params->timeout) { case QED_MCP_RESC_LOCK_TO_DEFAULT: opcode = RESOURCE_OPCODE_REQ; p_params->timeout = 0; break; case QED_MCP_RESC_LOCK_TO_NONE: opcode = RESOURCE_OPCODE_REQ_WO_AGING; p_params->timeout = 0; break; default: opcode = RESOURCE_OPCODE_REQ_W_AGING; break; } QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout); DP_VERBOSE(p_hwfn, QED_MSG_SP, "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n", param, p_params->timeout, opcode, p_params->resource); /* Attempt to acquire the resource */ rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param); if (rc) return rc; /* Analyze the response */ p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER); opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); DP_VERBOSE(p_hwfn, QED_MSG_SP, "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n", mcp_param, opcode, p_params->owner); switch (opcode) { case RESOURCE_OPCODE_GNT: p_params->b_granted = true; break; case RESOURCE_OPCODE_BUSY: p_params->b_granted = false; break; default: DP_NOTICE(p_hwfn, "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n", mcp_param, opcode); return -EINVAL; } return 0; } int qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params) { u32 retry_cnt = 0; int rc; do { /* No need for an interval before the first iteration */ if (retry_cnt) { if (p_params->sleep_b4_retry) { u16 retry_interval_in_ms = DIV_ROUND_UP(p_params->retry_interval, 1000); msleep(retry_interval_in_ms); } else { udelay(p_params->retry_interval); } } rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params); if (rc) return rc; if (p_params->b_granted) break; } while (retry_cnt++ < p_params->retry_num); return 0; } int qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_resc_unlock_params *p_params) { u32 param = 0, mcp_resp, mcp_param; u8 opcode; int rc; opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE : RESOURCE_OPCODE_RELEASE; QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); DP_VERBOSE(p_hwfn, QED_MSG_SP, "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n", param, opcode, p_params->resource); /* Attempt to release the resource */ rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param); if (rc) return rc; /* Analyze the response */ opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); DP_VERBOSE(p_hwfn, QED_MSG_SP, "Resource unlock response: mcp_param 0x%08x [opcode %d]\n", mcp_param, opcode); switch (opcode) { case RESOURCE_OPCODE_RELEASED_PREVIOUS: DP_INFO(p_hwfn, "Resource unlock request for an already released resource [%d]\n", p_params->resource); fallthrough; case RESOURCE_OPCODE_RELEASED: p_params->b_released = true; break; case RESOURCE_OPCODE_WRONG_OWNER: p_params->b_released = false; break; default: DP_NOTICE(p_hwfn, "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n", mcp_param, opcode); return -EINVAL; } return 0; } void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, struct qed_resc_unlock_params *p_unlock, enum qed_resc_lock resource, bool b_is_permanent) { if (p_lock) { memset(p_lock, 0, sizeof(*p_lock)); /* Permanent resources don't require aging, and there's no * point in trying to acquire them more than once since it's * unexpected another entity would release them. */ if (b_is_permanent) { p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE; } else { p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT; p_lock->retry_interval = QED_MCP_RESC_LOCK_RETRY_VAL_DFLT; p_lock->sleep_b4_retry = true; } p_lock->resource = resource; } if (p_unlock) { memset(p_unlock, 0, sizeof(*p_unlock)); p_unlock->resource = resource; } } bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn) { return !!(p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ); } int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 mcp_resp; int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT, 0, &mcp_resp, &p_hwfn->mcp_info->capabilities); if (!rc) DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE), "MFW supported features: %08x\n", p_hwfn->mcp_info->capabilities); return rc; } int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 mcp_resp, mcp_param, features; features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK | DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL; return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, features, &mcp_resp, &mcp_param); } int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_mb_params mb_params = {0}; struct qed_dev *cdev = p_hwfn->cdev; u8 fir_valid, l2_valid; int rc; mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) return rc; if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { DP_INFO(p_hwfn, "The get_engine_config command is unsupported by the MFW\n"); return -EOPNOTSUPP; } fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param, FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID); if (fir_valid) cdev->fir_affin = QED_MFW_GET_FIELD(mb_params.mcp_param, FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE); l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param, FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID); if (l2_valid) cdev->l2_affin_hint = QED_MFW_GET_FIELD(mb_params.mcp_param, FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE); DP_INFO(p_hwfn, "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n", fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint); return 0; } int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_mb_params mb_params = {0}; struct qed_dev *cdev = p_hwfn->cdev; int rc; mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) return rc; if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { DP_INFO(p_hwfn, "The get_ppfid_bitmap command is unsupported by the MFW\n"); return -EOPNOTSUPP; } cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param, FW_MB_PARAM_PPFID_BITMAP); DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n", cdev->ppfid_bitmap); return 0; } int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, u32 *p_len) { u32 mb_param = 0, resp, param; int rc; QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id); if (flags & QED_NVM_CFG_OPTION_INIT) QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1); if (flags & QED_NVM_CFG_OPTION_FREE) QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1); if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) { QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1); QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID, entity_id); } rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_NVM_CFG_OPTION, mb_param, &resp, &param, p_len, (u32 *)p_buf, false); return rc; } int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, u32 len) { u32 mb_param = 0, resp, param; QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id); if (flags & QED_NVM_CFG_OPTION_ALL) QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1); if (flags & QED_NVM_CFG_OPTION_INIT) QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1); if (flags & QED_NVM_CFG_OPTION_COMMIT) QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1); if (flags & QED_NVM_CFG_OPTION_FREE) QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1); if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) { QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1); QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID, entity_id); } return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_NVM_CFG_OPTION, mb_param, &resp, &param, len, (u32 *)p_buf); } #define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN #define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32) #define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \ (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE) static int __qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 *p_buf, u8 size) { struct qed_mcp_mb_params mb_params; int rc; if (size > QED_MCP_DBG_DATA_MAX_SIZE) { DP_ERR(p_hwfn, "Debug data size is %d while it should not exceed %d\n", size, QED_MCP_DBG_DATA_MAX_SIZE); return -EINVAL; } memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND; SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size); mb_params.p_data_src = p_buf; mb_params.data_src_size = size; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) return rc; if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is unsupported by the MFW\n"); return -EOPNOTSUPP; } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) { DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n"); return -EBUSY; } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) { DP_NOTICE(p_hwfn, "Failed to send debug data to the MFW [resp 0x%08x]\n", mb_params.mcp_resp); return -EINVAL; } return 0; } enum qed_mcp_dbg_data_type { QED_MCP_DBG_DATA_TYPE_RAW, }; /* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */ #define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0 #define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff #define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12 #define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000 #define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20 #define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000 #define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28 #define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000 #define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1 #define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2 static int qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size) { u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf; u32 tmp_size = size, *p_header, *p_payload; u8 flags = 0; u16 seq; int rc; p_header = (u32 *)raw_data; p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE); seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq); /* First chunk is marked as 'first' */ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST; *p_header = 0; SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq); SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type); SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags); SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id); while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) { memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE); rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data, QED_MCP_DBG_DATA_MAX_SIZE); if (rc) return rc; /* Clear the 'first' marking after sending the first chunk */ if (p_tmp_buf == p_buf) { flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST; SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags); } p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE; tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE; } /* Last chunk is marked as 'last' */ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST; SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags); memcpy(p_payload, p_tmp_buf, tmp_size); /* Casting the left size to u8 is ok since at this point it is <= 32 */ return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data, (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE + tmp_size)); } int qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 *p_buf, u32 size) { return qed_mcp_send_debug_data(p_hwfn, p_ptt, QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size); } bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn) { return !!(p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK); } int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active) { u32 resp = 0, param = 0; int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MANAGEMENT_STATUS, 0, &resp, &param); if (rc) { DP_NOTICE(p_hwfn, "Failed to send ESL command, rc = %d\n", rc); return rc; } *active = !!(param & FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED); return 0; }
linux-master
drivers/net/ethernet/qlogic/qed/qed_mcp.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <asm/byteorder.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <net/addrconf.h> #include "qed.h" #include "qed_cxt.h" #include "qed_hsi.h" #include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" #include "qed_ll2.h" #include "qed_mcp.h" #include "qed_reg_addr.h" #include <linux/qed/qed_rdma_if.h> #include "qed_rdma.h" #include "qed_roce.h" #include "qed_sp.h" int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 max_count, char *name) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count); bmap->max_count = max_count; bmap->bitmap = bitmap_zalloc(max_count, GFP_KERNEL); if (!bmap->bitmap) return -ENOMEM; snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); return 0; } int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 *id_num) { *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count); if (*id_num >= bmap->max_count) return -EINVAL; __set_bit(*id_num, bmap->bitmap); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n", bmap->name, *id_num); return 0; } void qed_bmap_set_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num) { if (id_num >= bmap->max_count) return; __set_bit(id_num, bmap->bitmap); } void qed_bmap_release_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num) { bool b_acquired; if (id_num >= bmap->max_count) return; b_acquired = test_and_clear_bit(id_num, bmap->bitmap); if (!b_acquired) { DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n", bmap->name, id_num); return; } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n", bmap->name, id_num); } int qed_bmap_test_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num) { if (id_num >= bmap->max_count) return -1; return test_bit(id_num, bmap->bitmap); } static bool qed_bmap_is_empty(struct qed_bmap *bmap) { return bitmap_empty(bmap->bitmap, bmap->max_count); } static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) { /* First sb id for RoCE is after all the l2 sb */ return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; } int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) { struct qed_rdma_info *p_rdma_info; p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); if (!p_rdma_info) return -ENOMEM; spin_lock_init(&p_rdma_info->lock); p_hwfn->p_rdma_info = p_rdma_info; return 0; } void qed_rdma_info_free(struct qed_hwfn *p_hwfn) { kfree(p_hwfn->p_rdma_info); p_hwfn->p_rdma_info = NULL; } static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) { struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; u32 num_cons, num_tasks; int rc = -ENOMEM; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); if (QED_IS_IWARP_PERSONALITY(p_hwfn)) p_rdma_info->proto = PROTOCOLID_IWARP; else p_rdma_info->proto = PROTOCOLID_ROCE; num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, NULL); if (QED_IS_IWARP_PERSONALITY(p_hwfn)) p_rdma_info->num_qps = num_cons; else p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */ num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); /* Each MR uses a single task */ p_rdma_info->num_mrs = num_tasks; /* Queue zone lines are shared between RoCE and L2 in such a way that * they can be used by each without obstructing the other. */ p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE); /* Allocate a struct with device params and fill it */ p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); if (!p_rdma_info->dev) return rc; /* Allocate a struct with port params and fill it */ p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); if (!p_rdma_info->port) goto free_rdma_dev; /* Allocate bit map for pd's */ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS, "PD"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate pd_map, rc = %d\n", rc); goto free_rdma_port; } /* Allocate bit map for XRC Domains */ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map, QED_RDMA_MAX_XRCDS, "XRCD"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate xrcd_map,rc = %d\n", rc); goto free_pd_map; } /* Allocate DPI bitmap */ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, p_hwfn->dpi_count, "DPI"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate DPI bitmap, rc = %d\n", rc); goto free_xrcd_map; } /* Allocate bitmap for cq's. The maximum number of CQs is bound to * the number of connections we support. (num_qps in iWARP or * num_qps/2 in RoCE). */ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, num_cons, "CQ"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate cq bitmap, rc = %d\n", rc); goto free_dpi_map; } /* Allocate bitmap for toggle bit for cq icids * We toggle the bit every time we create or resize cq for a given icid. * Size needs to equal the size of the cq bmap. */ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits, num_cons, "Toggle"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate toggle bits, rc = %d\n", rc); goto free_cq_map; } /* Allocate bitmap for itids */ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map, p_rdma_info->num_mrs, "MR"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate itids bitmaps, rc = %d\n", rc); goto free_toggle_map; } /* Allocate bitmap for cids used for qps. */ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons, "CID"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate cid bitmap, rc = %d\n", rc); goto free_tid_map; } /* Allocate bitmap for cids used for responders/requesters. */ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons, "REAL_CID"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate real cid bitmap, rc = %d\n", rc); goto free_cid_map; } /* The first SRQ follows the last XRC SRQ. This means that the * SRQ IDs start from an offset equals to max_xrc_srqs. */ p_rdma_info->srq_id_offset = p_hwfn->p_cxt_mngr->xrc_srq_count; rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrc_srq_map, p_hwfn->p_cxt_mngr->xrc_srq_count, "XRC SRQ"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate xrc srq bitmap, rc = %d\n", rc); goto free_real_cid_map; } /* Allocate bitmap for srqs */ p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count; rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map, p_rdma_info->num_srqs, "SRQ"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate srq bitmap, rc = %d\n", rc); goto free_xrc_srq_map; } if (QED_IS_IWARP_PERSONALITY(p_hwfn)) rc = qed_iwarp_alloc(p_hwfn); if (rc) goto free_srq_map; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); return 0; free_srq_map: kfree(p_rdma_info->srq_map.bitmap); free_xrc_srq_map: kfree(p_rdma_info->xrc_srq_map.bitmap); free_real_cid_map: kfree(p_rdma_info->real_cid_map.bitmap); free_cid_map: kfree(p_rdma_info->cid_map.bitmap); free_tid_map: kfree(p_rdma_info->tid_map.bitmap); free_toggle_map: kfree(p_rdma_info->toggle_bits.bitmap); free_cq_map: kfree(p_rdma_info->cq_map.bitmap); free_dpi_map: kfree(p_rdma_info->dpi_map.bitmap); free_xrcd_map: kfree(p_rdma_info->xrcd_map.bitmap); free_pd_map: kfree(p_rdma_info->pd_map.bitmap); free_rdma_port: kfree(p_rdma_info->port); free_rdma_dev: kfree(p_rdma_info->dev); return rc; } void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, bool check) { unsigned int bit, weight, nbits; unsigned long *b; if (!check) goto end; weight = bitmap_weight(bmap->bitmap, bmap->max_count); if (!weight) goto end; DP_NOTICE(p_hwfn, "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n", bmap->name, bmap->max_count, weight); for (bit = 0; bit < bmap->max_count; bit += 512) { b = bmap->bitmap + BITS_TO_LONGS(bit); nbits = min(bmap->max_count - bit, 512U); if (!bitmap_empty(b, nbits)) DP_NOTICE(p_hwfn, "line 0x%04x: %*pb\n", bit / 512, nbits, b); } end: bitmap_free(bmap->bitmap); bmap->bitmap = NULL; } static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) { struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; if (QED_IS_IWARP_PERSONALITY(p_hwfn)) qed_iwarp_resc_free(p_hwfn); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, 1); kfree(p_rdma_info->port); kfree(p_rdma_info->dev); } static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); spin_lock_bh(&p_hwfn->p_rdma_info->lock); qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn) { qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey); } static void qed_rdma_free(struct qed_hwfn *p_hwfn) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); qed_rdma_free_reserved_lkey(p_hwfn); qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto); qed_rdma_resc_free(p_hwfn); } static void qed_rdma_init_events(struct qed_hwfn *p_hwfn, struct qed_rdma_start_in_params *params) { struct qed_rdma_events *events; events = &p_hwfn->p_rdma_info->events; events->unaffiliated_event = params->events->unaffiliated_event; events->affiliated_event = params->events->affiliated_event; events->context = params->events->context; } static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, struct qed_rdma_start_in_params *params) { struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; struct qed_dev *cdev = p_hwfn->cdev; u32 pci_status_control; u32 num_qps; /* Vendor specific information */ dev->vendor_id = cdev->vendor_id; dev->vendor_part_id = cdev->device_id; dev->hw_ver = cdev->chip_rev; dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); addrconf_addr_eui48((u8 *)&dev->sys_image_guid, p_hwfn->hw_info.hw_mac_addr); dev->node_guid = dev->sys_image_guid; dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE, RDMA_MAX_SGE_PER_RQ_WQE); if (cdev->rdma_max_sge) dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE; if (p_hwfn->cdev->rdma_max_srq_sge) { dev->max_srq_sge = min_t(u32, p_hwfn->cdev->rdma_max_srq_sge, dev->max_srq_sge); } dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; dev->max_inline = (cdev->rdma_max_inline) ? min_t(u32, cdev->rdma_max_inline, dev->max_inline) : dev->max_inline; dev->max_wqe = QED_RDMA_MAX_WQE; dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ); /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because * it is up-aligned to 16 and then to ILT page size within qed cxt. * This is OK in terms of ILT but we don't want to configure the FW * above its abilities */ num_qps = ROCE_MAX_QPS; num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps); dev->max_qp = num_qps; /* CQs uses the same icids that QPs use hence they are limited by the * number of icids. There are two icids per QP. */ dev->max_cq = num_qps * 2; /* The number of mrs is smaller by 1 since the first is reserved */ dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1; dev->max_mr_size = QED_RDMA_MAX_MR_SIZE; /* The maximum CQE capacity per CQ supported. * max number of cqes will be in two layer pbl, * 8 is the pointer size in bytes * 32 is the size of cq element in bytes */ if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS) dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT; else dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT; dev->max_mw = 0; dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8); dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; if (QED_IS_ROCE_PERSONALITY(p_hwfn)) dev->max_pkey = QED_RDMA_MAX_P_KEY; dev->max_srq = p_hwfn->p_rdma_info->num_srqs; dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM; dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / RDMA_REQ_RD_ATOMIC_ELM_SIZE; dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc * p_hwfn->p_rdma_info->num_qps; dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS; dev->dev_ack_delay = QED_RDMA_ACK_DELAY; dev->max_pd = RDMA_MAX_PDS; dev->max_ah = p_hwfn->p_rdma_info->num_qps; dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE); /* Set capablities */ dev->dev_caps = 0; SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1); SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1); SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1); SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1); SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1); SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1); SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1); SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); /* Check atomic operations support in PCI configuration space. */ pcie_capability_read_dword(cdev->pdev, PCI_EXP_DEVCTL2, &pci_status_control); if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); if (QED_IS_IWARP_PERSONALITY(p_hwfn)) qed_iwarp_init_devinfo(p_hwfn); } static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) { struct qed_rdma_port *port = p_hwfn->p_rdma_info->port; struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; port->port_state = p_hwfn->mcp_info->link_output.link_up ? QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; port->max_msg_size = min_t(u64, (dev->max_mr_mw_fmr_size * p_hwfn->cdev->rdma_max_sge), BIT(31)); port->pkey_bad_counter = 0; } static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { int rc = 0; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); p_hwfn->b_rdma_enabled_in_prs = false; if (QED_IS_IWARP_PERSONALITY(p_hwfn)) qed_iwarp_init_hw(p_hwfn, p_ptt); else rc = qed_roce_init_hw(p_hwfn, p_ptt); return rc; } static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_start_in_params *params, struct qed_ptt *p_ptt) { struct rdma_init_func_ramrod_data *p_ramrod; struct qed_rdma_cnq_params *p_cnq_pbl_list; struct rdma_init_func_hdr *p_params_header; struct rdma_cnq_params *p_cnq_params; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; u32 cnq_id, sb_id; u16 igu_sb_id; int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); /* Save the number of cnqs for the function close ramrod */ p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT, p_hwfn->p_rdma_info->proto, &init_data); if (rc) return rc; if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { qed_iwarp_init_fw_ramrod(p_hwfn, &p_ent->ramrod.iwarp_init_func); p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; } else { p_ramrod = &p_ent->ramrod.roce_init_func.rdma; } p_params_header = &p_ramrod->params_header; p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); p_params_header->num_cnqs = params->desired_cnq; p_params_header->first_reg_srq_id = cpu_to_le16(p_hwfn->p_rdma_info->srq_id_offset); p_params_header->reg_srq_base_addr = cpu_to_le32(qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM)); if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) p_params_header->cq_ring_mode = 1; else p_params_header->cq_ring_mode = 0; for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id); p_cnq_params = &p_ramrod->cnq_params[cnq_id]; p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id]; p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr, p_cnq_pbl_list->pbl_ptr); /* we assume here that cnq_id and qz_offset are the same */ p_cnq_params->queue_zone_num = cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base + cnq_id); } return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); spin_lock_bh(&p_hwfn->p_rdma_info->lock); rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); if (rc) goto out; rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); out: DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); return rc; } static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) { struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; /* Tid 0 will be used as the key for "reserved MR". * The driver should allocate memory for it so it can be loaded but no * ramrod should be passed on it. */ qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey); if (dev->reserved_lkey != RDMA_RESERVED_LKEY) { DP_NOTICE(p_hwfn, "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n"); return -EINVAL; } return 0; } static int qed_rdma_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_rdma_start_in_params *params) { int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); qed_rdma_init_devinfo(p_hwfn, params); qed_rdma_init_port(p_hwfn); qed_rdma_init_events(p_hwfn, params); rc = qed_rdma_reserve_lkey(p_hwfn); if (rc) return rc; rc = qed_rdma_init_hw(p_hwfn, p_ptt); if (rc) return rc; if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { rc = qed_iwarp_setup(p_hwfn, params); if (rc) return rc; } else { rc = qed_roce_setup(p_hwfn); if (rc) return rc; } return qed_rdma_start_fw(p_hwfn, params, p_ptt); } static int qed_rdma_stop(void *rdma_cxt) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct rdma_close_func_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; struct qed_ptt *p_ptt; u32 ll2_ethertype_en; int rc = -EBUSY; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n"); p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n"); return rc; } /* Disable RoCE search */ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); p_hwfn->b_rdma_enabled_in_prs = false; p_hwfn->p_rdma_info->active = 0; qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, (ll2_ethertype_en & 0xFFFE)); if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { rc = qed_iwarp_stop(p_hwfn); if (rc) { qed_ptt_release(p_hwfn, p_ptt); return rc; } } else { qed_roce_stop(p_hwfn); } qed_ptt_release(p_hwfn, p_ptt); /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; /* Stop RoCE */ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE, p_hwfn->p_rdma_info->proto, &init_data); if (rc) goto out; p_ramrod = &p_ent->ramrod.rdma_close_func; p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs; p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); rc = qed_spq_post(p_hwfn, p_ent, NULL); out: qed_rdma_free(p_hwfn); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc); return rc; } static int qed_rdma_add_user(void *rdma_cxt, struct qed_rdma_add_user_out_params *out_params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; u32 dpi_start_offset; u32 returned_id = 0; int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n"); /* Allocate DPI */ spin_lock_bh(&p_hwfn->p_rdma_info->lock); rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, &returned_id); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); out_params->dpi = (u16)returned_id; /* Calculate the corresponding DPI address */ dpi_start_offset = p_hwfn->dpi_start_offset; out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset + out_params->dpi * p_hwfn->dpi_size; out_params->dpi_phys_addr = p_hwfn->db_phys_addr + dpi_start_offset + ((out_params->dpi) * p_hwfn->dpi_size); out_params->dpi_size = p_hwfn->dpi_size; out_params->wid_count = p_hwfn->wid_count; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc); return rc; } static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; struct qed_mcp_link_state *p_link_output; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n"); /* The link state is saved only for the leading hwfn */ p_link_output = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output; p_port->port_state = p_link_output->link_up ? QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; p_port->link_speed = p_link_output->speed; p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE; return p_port; } static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n"); /* Return struct with device parameters */ return p_hwfn->p_rdma_info->dev; } static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) { struct qed_hwfn *p_hwfn; u16 qz_num; u32 addr; p_hwfn = (struct qed_hwfn *)rdma_cxt; if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) { DP_NOTICE(p_hwfn, "queue zone offset %d is too large (max is %d)\n", qz_offset, p_hwfn->p_rdma_info->max_queue_zones); return; } qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, USTORM_COMMON_QUEUE_CONS, qz_num); REG_WR16(p_hwfn, addr, prod); /* keep prod updates ordered */ wmb(); } static int qed_fill_rdma_dev_info(struct qed_dev *cdev, struct qed_dev_rdma_info *info) { struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); memset(info, 0, sizeof(*info)); info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ? QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP; info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0); qed_fill_dev_info(cdev, &info->common); return 0; } static int qed_rdma_get_sb_start(struct qed_dev *cdev) { int feat_num; if (cdev->num_hwfns > 1) feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE); else feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE) * cdev->num_hwfns; return feat_num; } static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev) { int n_cnq = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_RDMA_CNQ); int n_msix = cdev->int_params.rdma_msix_cnt; return min_t(int, n_cnq, n_msix); } static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt) { int limit = 0; /* Mark the fastpath as free/used */ cdev->int_params.fp_initialized = cnt ? true : false; if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) { DP_ERR(cdev, "qed roce supports only MSI-X interrupts (detected %d).\n", cdev->int_params.out.int_mode); return -EINVAL; } else if (cdev->int_params.fp_msix_cnt) { limit = cdev->int_params.rdma_msix_cnt; } if (!limit) return -ENOMEM; return min_t(int, cnt, limit); } static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) { memset(info, 0, sizeof(*info)); if (!cdev->int_params.fp_initialized) { DP_INFO(cdev, "Protocol driver requested interrupt information, but its support is not yet configured\n"); return -EINVAL; } if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { int msix_base = cdev->int_params.rdma_msix_base; info->msix_cnt = cdev->int_params.rdma_msix_cnt; info->msix = &cdev->int_params.msix_table[msix_base]; DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n", info->msix_cnt, msix_base); } return 0; } static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; u32 returned_id; int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n"); /* Allocates an unused protection domain */ spin_lock_bh(&p_hwfn->p_rdma_info->lock); rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, &returned_id); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); *pd = (u16)returned_id; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc); return rc; } static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd); /* Returns a previously allocated protection domain for reuse */ spin_lock_bh(&p_hwfn->p_rdma_info->lock); qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } static int qed_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; u32 returned_id; int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD\n"); spin_lock_bh(&p_hwfn->p_rdma_info->lock); rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, &returned_id); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); if (rc) { DP_NOTICE(p_hwfn, "Failed in allocating xrcd id\n"); return rc; } *xrcd_id = (u16)returned_id; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc); return rc; } static void qed_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id); spin_lock_bh(&p_hwfn->p_rdma_info->lock); qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } static enum qed_rdma_toggle_bit qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) { struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; enum qed_rdma_toggle_bit toggle_bit; u32 bmap_id; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid); /* the function toggle the bit that is related to a given icid * and returns the new toggle bit's value */ bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto); spin_lock_bh(&p_info->lock); toggle_bit = !test_and_change_bit(bmap_id, p_info->toggle_bits.bitmap); spin_unlock_bh(&p_info->lock); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n", toggle_bit); return toggle_bit; } static int qed_rdma_create_cq(void *rdma_cxt, struct qed_rdma_create_cq_in_params *params, u16 *icid) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; struct rdma_create_cq_ramrod_data *p_ramrod; enum qed_rdma_toggle_bit toggle_bit; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; u32 returned_id, start_cid; int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n", params->cq_handle_hi, params->cq_handle_lo); /* Allocate icid */ spin_lock_bh(&p_info->lock); rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id); spin_unlock_bh(&p_info->lock); if (rc) { DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc); return rc; } start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto); *icid = returned_id + start_cid; /* Check if icid requires a page allocation */ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid); if (rc) goto err; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = *icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; /* Send create CQ ramrod */ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_CREATE_CQ, p_info->proto, &init_data); if (rc) goto err; p_ramrod = &p_ent->ramrod.rdma_create_cq; p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi); p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo); p_ramrod->dpi = cpu_to_le16(params->dpi); p_ramrod->is_two_level_pbl = params->pbl_two_level; p_ramrod->max_cqes = cpu_to_le32(params->cq_size); DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr); p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages); p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) + params->cnq_id; p_ramrod->int_timeout = cpu_to_le16(params->int_timeout); /* toggle the bit for every resize or create cq for a given icid */ toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); p_ramrod->toggle_bit = toggle_bit; rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) { /* restore toggle bit */ qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); goto err; } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc); return rc; err: /* release allocated icid */ spin_lock_bh(&p_info->lock); qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id); spin_unlock_bh(&p_info->lock); DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc); return rc; } static int qed_rdma_destroy_cq(void *rdma_cxt, struct qed_rdma_destroy_cq_in_params *in_params, struct qed_rdma_destroy_cq_out_params *out_params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct rdma_destroy_cq_output_params *p_ramrod_res; struct rdma_destroy_cq_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; dma_addr_t ramrod_res_phys; enum protocol_type proto; int rc = -ENOMEM; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid); p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(struct rdma_destroy_cq_output_params), &ramrod_res_phys, GFP_KERNEL); if (!p_ramrod_res) { DP_NOTICE(p_hwfn, "qed destroy cq failed: cannot allocate memory (ramrod)\n"); return rc; } /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = in_params->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; proto = p_hwfn->p_rdma_info->proto; /* Send destroy CQ ramrod */ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DESTROY_CQ, proto, &init_data); if (rc) goto err; p_ramrod = &p_ent->ramrod.rdma_destroy_cq; DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) goto err; out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num); dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(struct rdma_destroy_cq_output_params), p_ramrod_res, ramrod_res_phys); /* Free icid */ spin_lock_bh(&p_hwfn->p_rdma_info->lock); qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cq_map, (in_params->icid - qed_cxt_get_proto_cid_start(p_hwfn, proto))); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc); return rc; err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(struct rdma_destroy_cq_output_params), p_ramrod_res, ramrod_res_phys); return rc; } void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac) { p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]); p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]); p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]); } static int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp, struct qed_rdma_query_qp_out_params *out_params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; int rc = 0; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); /* The following fields are filled in from qp and not FW as they can't * be modified by FW */ out_params->mtu = qp->mtu; out_params->dest_qp = qp->dest_qp; out_params->incoming_atomic_en = qp->incoming_atomic_en; out_params->e2e_flow_control_en = qp->e2e_flow_control_en; out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en; out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en; out_params->dgid = qp->dgid; out_params->flow_label = qp->flow_label; out_params->hop_limit_ttl = qp->hop_limit_ttl; out_params->traffic_class_tos = qp->traffic_class_tos; out_params->timeout = qp->ack_timeout; out_params->rnr_retry = qp->rnr_retry_cnt; out_params->retry_cnt = qp->retry_cnt; out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer; out_params->pkey_index = 0; out_params->max_rd_atomic = qp->max_rd_atomic_req; out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp; out_params->sqd_async = qp->sqd_async; if (QED_IS_IWARP_PERSONALITY(p_hwfn)) qed_iwarp_query_qp(qp, out_params); else rc = qed_roce_query_qp(p_hwfn, qp, out_params); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc); return rc; } static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; int rc = 0; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); if (QED_IS_IWARP_PERSONALITY(p_hwfn)) rc = qed_iwarp_destroy_qp(p_hwfn, qp); else rc = qed_roce_destroy_qp(p_hwfn, qp); /* free qp params struct */ kfree(qp); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n"); return rc; } static struct qed_rdma_qp * qed_rdma_create_qp(void *rdma_cxt, struct qed_rdma_create_qp_in_params *in_params, struct qed_rdma_create_qp_out_params *out_params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct qed_rdma_qp *qp; u8 max_stats_queues; int rc; if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info->active) { pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", rdma_cxt, in_params, out_params); return NULL; } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qed rdma create qp called with qp_handle = %08x%08x\n", in_params->qp_handle_hi, in_params->qp_handle_lo); /* Some sanity checks... */ max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues; if (in_params->stats_queue >= max_stats_queues) { DP_ERR(p_hwfn->cdev, "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n", in_params->stats_queue, max_stats_queues); return NULL; } if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { if (in_params->sq_num_pages * sizeof(struct regpair) > IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) { DP_NOTICE(p_hwfn->cdev, "Sq num pages: %d exceeds maximum\n", in_params->sq_num_pages); return NULL; } if (in_params->rq_num_pages * sizeof(struct regpair) > IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) { DP_NOTICE(p_hwfn->cdev, "Rq num pages: %d exceeds maximum\n", in_params->rq_num_pages); return NULL; } } qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return NULL; qp->cur_state = QED_ROCE_QP_STATE_RESET; qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi); qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo); qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi); qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo); qp->use_srq = in_params->use_srq; qp->signal_all = in_params->signal_all; qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey; qp->pd = in_params->pd; qp->dpi = in_params->dpi; qp->sq_cq_id = in_params->sq_cq_id; qp->sq_num_pages = in_params->sq_num_pages; qp->sq_pbl_ptr = in_params->sq_pbl_ptr; qp->rq_cq_id = in_params->rq_cq_id; qp->rq_num_pages = in_params->rq_num_pages; qp->rq_pbl_ptr = in_params->rq_pbl_ptr; qp->srq_id = in_params->srq_id; qp->req_offloaded = false; qp->resp_offloaded = false; qp->e2e_flow_control_en = qp->use_srq ? false : true; qp->stats_queue = in_params->stats_queue; qp->qp_type = in_params->qp_type; qp->xrcd_id = in_params->xrcd_id; if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { rc = qed_iwarp_create_qp(p_hwfn, qp, out_params); qp->qpid = qp->icid; } else { qp->edpm_mode = GET_FIELD(in_params->flags, QED_ROCE_EDPM_MODE); rc = qed_roce_alloc_cid(p_hwfn, &qp->icid); qp->qpid = ((0xFF << 16) | qp->icid); } if (rc) { kfree(qp); return NULL; } out_params->icid = qp->icid; out_params->qp_id = qp->qpid; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc); return qp; } static int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp, struct qed_rdma_modify_qp_in_params *params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; enum qed_roce_qp_state prev_state; int rc = 0; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n", qp->icid, params->new_state); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); return rc; } if (GET_FIELD(params->modify_flags, QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) { qp->incoming_rdma_read_en = params->incoming_rdma_read_en; qp->incoming_rdma_write_en = params->incoming_rdma_write_en; qp->incoming_atomic_en = params->incoming_atomic_en; } /* Update QP structure with the updated values */ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE)) qp->roce_mode = params->roce_mode; if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)) qp->pkey = params->pkey; if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN)) qp->e2e_flow_control_en = params->e2e_flow_control_en; if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP)) qp->dest_qp = params->dest_qp; if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) { /* Indicates that the following parameters have changed: * Traffic class, flow label, hop limit, source GID, * destination GID, loopback indicator */ qp->traffic_class_tos = params->traffic_class_tos; qp->flow_label = params->flow_label; qp->hop_limit_ttl = params->hop_limit_ttl; qp->sgid = params->sgid; qp->dgid = params->dgid; qp->udp_src_port = 0; qp->vlan_id = params->vlan_id; qp->mtu = params->mtu; qp->lb_indication = params->lb_indication; memcpy((u8 *)&qp->remote_mac_addr[0], (u8 *)&params->remote_mac_addr[0], ETH_ALEN); if (params->use_local_mac) { memcpy((u8 *)&qp->local_mac_addr[0], (u8 *)&params->local_mac_addr[0], ETH_ALEN); } else { memcpy((u8 *)&qp->local_mac_addr[0], (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); } } if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN)) qp->rq_psn = params->rq_psn; if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN)) qp->sq_psn = params->sq_psn; if (GET_FIELD(params->modify_flags, QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)) qp->max_rd_atomic_req = params->max_rd_atomic_req; if (GET_FIELD(params->modify_flags, QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)) qp->max_rd_atomic_resp = params->max_rd_atomic_resp; if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)) qp->ack_timeout = params->ack_timeout; if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)) qp->retry_cnt = params->retry_cnt; if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)) qp->rnr_retry_cnt = params->rnr_retry_cnt; if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)) qp->min_rnr_nak_timer = params->min_rnr_nak_timer; qp->sqd_async = params->sqd_async; prev_state = qp->cur_state; if (GET_FIELD(params->modify_flags, QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) { qp->cur_state = params->new_state; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n", qp->cur_state); } switch (qp->qp_type) { case QED_RDMA_QP_TYPE_XRC_INI: qp->has_req = true; break; case QED_RDMA_QP_TYPE_XRC_TGT: qp->has_resp = true; break; default: qp->has_req = true; qp->has_resp = true; } if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { enum qed_iwarp_qp_state new_state = qed_roce2iwarp_state(qp->cur_state); rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0); } else { rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc); return rc; } static int qed_rdma_register_tid(void *rdma_cxt, struct qed_rdma_register_tid_in_params *params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct rdma_register_tid_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; enum rdma_tid_type tid_type; u8 fw_return_code; u16 flags = 0; int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid); /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR, p_hwfn->p_rdma_info->proto, &init_data); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); return rc; } if (p_hwfn->p_rdma_info->last_tid < params->itid) p_hwfn->p_rdma_info->last_tid = params->itid; SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, params->pbl_two_level); SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, false); SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); /* Don't initialize D/C field, as it may override other bits. */ if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr)) SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, params->page_size_log - 12); SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, params->remote_read); SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, params->remote_write); SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, params->remote_atomic); SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, params->local_write); SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read); SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, params->mw_bind); p_ramrod = &p_ent->ramrod.rdma_register_tid; p_ramrod->flags = cpu_to_le16(flags); SET_FIELD(p_ramrod->flags1, RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG, params->pbl_page_size_log - 12); SET_FIELD(p_ramrod->flags2, RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr); switch (params->tid_type) { case QED_RDMA_TID_REGISTERED_MR: tid_type = RDMA_TID_REGISTERED_MR; break; case QED_RDMA_TID_FMR: tid_type = RDMA_TID_FMR; break; case QED_RDMA_TID_MW: tid_type = RDMA_TID_MW; break; default: rc = -EINVAL; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); qed_sp_destroy_request(p_hwfn, p_ent); return rc; } SET_FIELD(p_ramrod->flags1, RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type); p_ramrod->itid = cpu_to_le32(params->itid); p_ramrod->key = params->key; p_ramrod->pd = cpu_to_le16(params->pd); p_ramrod->length_hi = (u8)(params->length >> 32); p_ramrod->length_lo = DMA_LO_LE(params->length); DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr); /* DIF */ if (params->dif_enabled) { SET_FIELD(p_ramrod->flags2, RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); DMA_REGPAIR_LE(p_ramrod->dif_error_addr, params->dif_error_addr); } rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); if (rc) return rc; if (fw_return_code != RDMA_RETURN_OK) { DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); return -EINVAL; } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc); return rc; } static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct rdma_deregister_tid_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; struct qed_ptt *p_ptt; u8 fw_return_code; int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR, p_hwfn->p_rdma_info->proto, &init_data); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); return rc; } p_ramrod = &p_ent->ramrod.rdma_deregister_tid; p_ramrod->itid = cpu_to_le32(itid); rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); return rc; } if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) { DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); return -EINVAL; } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) { /* Bit indicating that the TID is in use and a nig drain is * required before sending the ramrod again */ p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) { rc = -EBUSY; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n"); return rc; } rc = qed_mcp_drain(p_hwfn, p_ptt); if (rc) { qed_ptt_release(p_hwfn, p_ptt); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Drain failed\n"); return rc; } qed_ptt_release(p_hwfn, p_ptt); /* Resend the ramrod */ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR, p_hwfn->p_rdma_info->proto, &init_data); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to init sp-element\n"); return rc; } rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Ramrod failed\n"); return rc; } if (fw_return_code != RDMA_RETURN_OK) { DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); return rc; } } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc); return rc; } static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) { return QED_AFFIN_HWFN(cdev); } static struct qed_bmap *qed_rdma_get_srq_bmap(struct qed_hwfn *p_hwfn, bool is_xrc) { if (is_xrc) return &p_hwfn->p_rdma_info->xrc_srq_map; return &p_hwfn->p_rdma_info->srq_map; } static int qed_rdma_modify_srq(void *rdma_cxt, struct qed_rdma_modify_srq_in_params *in_params) { struct rdma_srq_modify_ramrod_data *p_ramrod; struct qed_sp_init_data init_data = {}; struct qed_hwfn *p_hwfn = rdma_cxt; struct qed_spq_entry *p_ent; u16 opaque_fid; int rc; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_MODIFY_SRQ, p_hwfn->p_rdma_info->proto, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.rdma_modify_srq; p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); opaque_fid = p_hwfn->hw_info.opaque_fid; p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit); rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) return rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n", in_params->srq_id, in_params->is_xrc); return rc; } static int qed_rdma_destroy_srq(void *rdma_cxt, struct qed_rdma_destroy_srq_in_params *in_params) { struct rdma_srq_destroy_ramrod_data *p_ramrod; struct qed_sp_init_data init_data = {}; struct qed_hwfn *p_hwfn = rdma_cxt; struct qed_spq_entry *p_ent; struct qed_bmap *bmap; u16 opaque_fid; u16 offset; int rc; opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.opaque_fid = opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DESTROY_SRQ, p_hwfn->p_rdma_info->proto, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.rdma_destroy_srq; p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) return rc; bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc); offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset; spin_lock_bh(&p_hwfn->p_rdma_info->lock); qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id - offset); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "XRC/SRQ destroyed Id = %x, is_xrc=%u\n", in_params->srq_id, in_params->is_xrc); return rc; } static int qed_rdma_create_srq(void *rdma_cxt, struct qed_rdma_create_srq_in_params *in_params, struct qed_rdma_create_srq_out_params *out_params) { struct rdma_srq_create_ramrod_data *p_ramrod; struct qed_sp_init_data init_data = {}; struct qed_hwfn *p_hwfn = rdma_cxt; enum qed_cxt_elem_type elem_type; struct qed_spq_entry *p_ent; u16 opaque_fid, srq_id; struct qed_bmap *bmap; u32 returned_id; u16 offset; int rc; bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc); spin_lock_bh(&p_hwfn->p_rdma_info->lock); rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); if (rc) { DP_NOTICE(p_hwfn, "failed to allocate xrc/srq id (is_xrc=%u)\n", in_params->is_xrc); return rc; } elem_type = (in_params->is_xrc) ? (QED_ELEM_XRC_SRQ) : (QED_ELEM_SRQ); rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id); if (rc) goto err; opaque_fid = p_hwfn->hw_info.opaque_fid; opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.opaque_fid = opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_CREATE_SRQ, p_hwfn->p_rdma_info->proto, &init_data); if (rc) goto err; p_ramrod = &p_ent->ramrod.rdma_create_srq; DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr); p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages); p_ramrod->pd_id = cpu_to_le16(in_params->pd_id); p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); p_ramrod->page_size = cpu_to_le16(in_params->page_size); DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr); offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset; srq_id = (u16)returned_id + offset; p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id); if (in_params->is_xrc) { SET_FIELD(p_ramrod->flags, RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG, 1); SET_FIELD(p_ramrod->flags, RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN, in_params->reserved_key_en); p_ramrod->xrc_srq_cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | in_params->cq_cid); p_ramrod->xrc_domain = cpu_to_le16(in_params->xrcd_id); } rc = qed_spq_post(p_hwfn, p_ent, NULL); if (rc) goto err; out_params->srq_id = srq_id; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "XRC/SRQ created Id = %x (is_xrc=%u)\n", out_params->srq_id, in_params->is_xrc); return rc; err: spin_lock_bh(&p_hwfn->p_rdma_info->lock); qed_bmap_release_id(p_hwfn, bmap, returned_id); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); return rc; } bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) { bool result; /* if rdma wasn't activated yet, naturally there are no qps */ if (!p_hwfn->p_rdma_info->active) return false; spin_lock_bh(&p_hwfn->p_rdma_info->lock); if (!p_hwfn->p_rdma_info->cid_map.bitmap) result = false; else result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); return result; } void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 val; val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1; qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val); DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA), "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n", val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); } void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { p_hwfn->db_bar_no_edpm = true; qed_rdma_dpm_conf(p_hwfn, p_ptt); } static int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct qed_ptt *p_ptt; int rc = -EBUSY; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "desired_cnq = %08x\n", params->desired_cnq); p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) goto err; rc = qed_rdma_alloc(p_hwfn); if (rc) goto err1; rc = qed_rdma_setup(p_hwfn, p_ptt, params); if (rc) goto err2; qed_ptt_release(p_hwfn, p_ptt); p_hwfn->p_rdma_info->active = 1; return rc; err2: qed_rdma_free(p_hwfn); err1: qed_ptt_release(p_hwfn, p_ptt); err: DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc); return rc; } static int qed_rdma_init(struct qed_dev *cdev, struct qed_rdma_start_in_params *params) { return qed_rdma_start(QED_AFFIN_HWFN(cdev), params); } static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi); spin_lock_bh(&p_hwfn->p_rdma_info->lock); qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi); spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, u8 *old_mac_address, const u8 *new_mac_address) { int rc = 0; if (old_mac_address) qed_llh_remove_mac_filter(cdev, 0, old_mac_address); if (new_mac_address) rc = qed_llh_add_mac_filter(cdev, 0, new_mac_address); if (rc) DP_ERR(cdev, "qed roce ll2 mac filter set: failed to add MAC filter\n"); return rc; } static int qed_iwarp_set_engine_affin(struct qed_dev *cdev, bool b_reset) { enum qed_eng eng; u8 ppfid = 0; int rc; /* Make sure iwarp cmt mode is enabled before setting affinity */ if (!cdev->iwarp_cmt) return -EINVAL; if (b_reset) eng = QED_BOTH_ENG; else eng = cdev->l2_affin_hint ? QED_ENG1 : QED_ENG0; rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng); if (rc) { DP_NOTICE(cdev, "Failed to set the engine affinity of ppfid %d\n", ppfid); return rc; } DP_VERBOSE(cdev, (QED_MSG_RDMA | QED_MSG_SP), "LLH: Set the engine affinity of non-RoCE packets as %d\n", eng); return 0; } static const struct qed_rdma_ops qed_rdma_ops_pass = { .common = &qed_common_ops_pass, .fill_dev_info = &qed_fill_rdma_dev_info, .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx, .rdma_init = &qed_rdma_init, .rdma_add_user = &qed_rdma_add_user, .rdma_remove_user = &qed_rdma_remove_user, .rdma_stop = &qed_rdma_stop, .rdma_query_port = &qed_rdma_query_port, .rdma_query_device = &qed_rdma_query_device, .rdma_get_start_sb = &qed_rdma_get_sb_start, .rdma_get_rdma_int = &qed_rdma_get_int, .rdma_set_rdma_int = &qed_rdma_set_int, .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix, .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, .rdma_alloc_pd = &qed_rdma_alloc_pd, .rdma_dealloc_pd = &qed_rdma_free_pd, .rdma_alloc_xrcd = &qed_rdma_alloc_xrcd, .rdma_dealloc_xrcd = &qed_rdma_free_xrcd, .rdma_create_cq = &qed_rdma_create_cq, .rdma_destroy_cq = &qed_rdma_destroy_cq, .rdma_create_qp = &qed_rdma_create_qp, .rdma_modify_qp = &qed_rdma_modify_qp, .rdma_query_qp = &qed_rdma_query_qp, .rdma_destroy_qp = &qed_rdma_destroy_qp, .rdma_alloc_tid = &qed_rdma_alloc_tid, .rdma_free_tid = &qed_rdma_free_tid, .rdma_register_tid = &qed_rdma_register_tid, .rdma_deregister_tid = &qed_rdma_deregister_tid, .rdma_create_srq = &qed_rdma_create_srq, .rdma_modify_srq = &qed_rdma_modify_srq, .rdma_destroy_srq = &qed_rdma_destroy_srq, .ll2_acquire_connection = &qed_ll2_acquire_connection, .ll2_establish_connection = &qed_ll2_establish_connection, .ll2_terminate_connection = &qed_ll2_terminate_connection, .ll2_release_connection = &qed_ll2_release_connection, .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer, .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet, .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet, .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, .ll2_get_stats = &qed_ll2_get_stats, .iwarp_set_engine_affin = &qed_iwarp_set_engine_affin, .iwarp_connect = &qed_iwarp_connect, .iwarp_create_listen = &qed_iwarp_create_listen, .iwarp_destroy_listen = &qed_iwarp_destroy_listen, .iwarp_accept = &qed_iwarp_accept, .iwarp_reject = &qed_iwarp_reject, .iwarp_send_rtr = &qed_iwarp_send_rtr, }; const struct qed_rdma_ops *qed_get_rdma_ops(void) { return &qed_rdma_ops_pass; } EXPORT_SYMBOL(qed_get_rdma_ops);
linux-master
drivers/net/ethernet/qlogic/qed/qed_rdma.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* Copyright 2021 Marvell. All rights reserved. */ #include <linux/types.h> #include <asm/byteorder.h> #include <asm/param.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/kernel.h> #include <linux/log2.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/qed/qed_nvmetcp_if.h> #include "qed.h" #include "qed_cxt.h" #include "qed_dev_api.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_nvmetcp.h" #include "qed_ll2.h" #include "qed_mcp.h" #include "qed_sp.h" #include "qed_reg_addr.h" #include "qed_nvmetcp_fw_funcs.h" static int qed_nvmetcp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, u16 echo, union event_ring_data *data, u8 fw_return_code) { if (p_hwfn->p_nvmetcp_info->event_cb) { struct qed_nvmetcp_info *p_nvmetcp = p_hwfn->p_nvmetcp_info; return p_nvmetcp->event_cb(p_nvmetcp->event_context, fw_event_code, data); } else { DP_NOTICE(p_hwfn, "nvmetcp async completion is not set\n"); return -EINVAL; } } static int qed_sp_nvmetcp_func_start(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr, void *event_context, nvmetcp_event_cb_t async_event_cb) { struct nvmetcp_init_ramrod_params *p_ramrod = NULL; struct qed_nvmetcp_pf_params *p_params = NULL; struct scsi_init_func_queues *p_queue = NULL; struct nvmetcp_spe_func_init *p_init = NULL; struct qed_sp_init_data init_data = {}; struct qed_spq_entry *p_ent = NULL; int rc = 0; u16 val; u8 i; /* Get SPQ entry */ init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, NVMETCP_RAMROD_CMD_ID_INIT_FUNC, PROTOCOLID_TCP_ULP, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.nvmetcp_init; p_init = &p_ramrod->nvmetcp_init_spe; p_params = &p_hwfn->pf_params.nvmetcp_pf_params; p_queue = &p_init->q_params; p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring; p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring; p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring; p_init->ll2_rx_queue_id = RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) + p_params->ll2_ooo_queue_id; SET_FIELD(p_init->flags, NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE, 1); p_init->func_params.log_page_size = ilog2(PAGE_SIZE); p_init->func_params.num_tasks = cpu_to_le16(p_params->num_tasks); p_init->debug_flags = p_params->debug_mode; DMA_REGPAIR_LE(p_queue->glbl_q_params_addr, p_params->glbl_q_params_addr); p_queue->cq_num_entries = cpu_to_le16(QED_NVMETCP_FW_CQ_SIZE); p_queue->num_queues = p_params->num_queues; val = RESC_START(p_hwfn, QED_CMDQS_CQS); p_queue->queue_relative_offset = cpu_to_le16((u16)val); p_queue->cq_sb_pi = p_params->gl_rq_pi; for (i = 0; i < p_params->num_queues; i++) { val = qed_get_igu_sb_id(p_hwfn, i); p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val); } SET_FIELD(p_queue->q_validity, SCSI_INIT_FUNC_QUEUES_CMD_VALID, 0); p_queue->cmdq_num_entries = 0; p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ); p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(QED_TCP_TWO_MSL_TIMER); p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(QED_TCP_SWS_TIMER); p_init->half_way_close_timeout = cpu_to_le16(QED_TCP_HALF_WAY_CLOSE_TIMEOUT); p_ramrod->tcp_init.max_fin_rt = QED_TCP_MAX_FIN_RT; SET_FIELD(p_ramrod->nvmetcp_init_spe.params, NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT, QED_TCP_MAX_FIN_RT); p_hwfn->p_nvmetcp_info->event_context = event_context; p_hwfn->p_nvmetcp_info->event_cb = async_event_cb; qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP, qed_nvmetcp_async_event); return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_nvmetcp_func_stop(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC, PROTOCOLID_TCP_ULP, &init_data); if (rc) return rc; rc = qed_spq_post(p_hwfn, p_ent, NULL); qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP); return rc; } static int qed_fill_nvmetcp_dev_info(struct qed_dev *cdev, struct qed_dev_nvmetcp_info *info) { struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev); int rc; memset(info, 0, sizeof(*info)); rc = qed_fill_dev_info(cdev, &info->common); info->port_id = MFW_PORT(hwfn); info->num_cqs = FEAT_NUM(hwfn, QED_NVMETCP_CQ); return rc; } static void qed_register_nvmetcp_ops(struct qed_dev *cdev, struct qed_nvmetcp_cb_ops *ops, void *cookie) { cdev->protocol_ops.nvmetcp = ops; cdev->ops_cookie = cookie; } static int qed_nvmetcp_stop(struct qed_dev *cdev) { int rc; if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) { DP_NOTICE(cdev, "nvmetcp already stopped\n"); return 0; } if (!hash_empty(cdev->connections)) { DP_NOTICE(cdev, "Can't stop nvmetcp - not all connections were returned\n"); return -EINVAL; } /* Stop the nvmetcp */ rc = qed_sp_nvmetcp_func_stop(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK, NULL); cdev->flags &= ~QED_FLAG_STORAGE_STARTED; return rc; } static int qed_nvmetcp_start(struct qed_dev *cdev, struct qed_nvmetcp_tid *tasks, void *event_context, nvmetcp_event_cb_t async_event_cb) { struct qed_tid_mem *tid_info; int rc; if (cdev->flags & QED_FLAG_STORAGE_STARTED) { DP_NOTICE(cdev, "nvmetcp already started;\n"); return 0; } rc = qed_sp_nvmetcp_func_start(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK, NULL, event_context, async_event_cb); if (rc) { DP_NOTICE(cdev, "Failed to start nvmetcp\n"); return rc; } cdev->flags |= QED_FLAG_STORAGE_STARTED; hash_init(cdev->connections); if (!tasks) return 0; tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL); if (!tid_info) { qed_nvmetcp_stop(cdev); return -ENOMEM; } rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info); if (rc) { DP_NOTICE(cdev, "Failed to gather task information\n"); qed_nvmetcp_stop(cdev); kfree(tid_info); return rc; } /* Fill task information */ tasks->size = tid_info->tid_size; tasks->num_tids_per_block = tid_info->num_tids_per_block; memcpy(tasks->blocks, tid_info->blocks, MAX_TID_BLOCKS_NVMETCP * sizeof(u8 *)); kfree(tid_info); return 0; } static struct qed_hash_nvmetcp_con *qed_nvmetcp_get_hash(struct qed_dev *cdev, u32 handle) { struct qed_hash_nvmetcp_con *hash_con = NULL; if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) return NULL; hash_for_each_possible(cdev->connections, hash_con, node, handle) { if (hash_con->con->icid == handle) break; } if (!hash_con || hash_con->con->icid != handle) return NULL; return hash_con; } static int qed_sp_nvmetcp_conn_offload(struct qed_hwfn *p_hwfn, struct qed_nvmetcp_conn *p_conn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { struct nvmetcp_spe_conn_offload *p_ramrod = NULL; struct tcp_offload_params_opt2 *p_tcp = NULL; struct qed_sp_init_data init_data = { 0 }; struct qed_spq_entry *p_ent = NULL; dma_addr_t r2tq_pbl_addr; dma_addr_t xhq_pbl_addr; dma_addr_t uhq_pbl_addr; u16 physical_q; int rc = 0; u8 i; /* Get SPQ entry */ init_data.cid = p_conn->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN, PROTOCOLID_TCP_ULP, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.nvmetcp_conn_offload; /* Transmission PQ is the first of the PF */ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); p_conn->physical_q0 = cpu_to_le16(physical_q); p_ramrod->nvmetcp.physical_q0 = cpu_to_le16(physical_q); /* nvmetcp Pure-ACK PQ */ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); p_conn->physical_q1 = cpu_to_le16(physical_q); p_ramrod->nvmetcp.physical_q1 = cpu_to_le16(physical_q); p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); DMA_REGPAIR_LE(p_ramrod->nvmetcp.sq_pbl_addr, p_conn->sq_pbl_addr); r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq); DMA_REGPAIR_LE(p_ramrod->nvmetcp.r2tq_pbl_addr, r2tq_pbl_addr); xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq); DMA_REGPAIR_LE(p_ramrod->nvmetcp.xhq_pbl_addr, xhq_pbl_addr); uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq); DMA_REGPAIR_LE(p_ramrod->nvmetcp.uhq_pbl_addr, uhq_pbl_addr); p_ramrod->nvmetcp.flags = p_conn->offl_flags; p_ramrod->nvmetcp.default_cq = p_conn->default_cq; p_ramrod->nvmetcp.initial_ack = 0; DMA_REGPAIR_LE(p_ramrod->nvmetcp.nvmetcp.cccid_itid_table_addr, p_conn->nvmetcp_cccid_itid_table_addr); p_ramrod->nvmetcp.nvmetcp.cccid_max_range = cpu_to_le16(p_conn->nvmetcp_cccid_max_range); p_tcp = &p_ramrod->tcp; qed_set_fw_mac_addr(&p_tcp->remote_mac_addr_hi, &p_tcp->remote_mac_addr_mid, &p_tcp->remote_mac_addr_lo, p_conn->remote_mac); qed_set_fw_mac_addr(&p_tcp->local_mac_addr_hi, &p_tcp->local_mac_addr_mid, &p_tcp->local_mac_addr_lo, p_conn->local_mac); p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id); p_tcp->flags = cpu_to_le16(p_conn->tcp_flags); p_tcp->ip_version = p_conn->ip_version; if (p_tcp->ip_version == TCP_IPV6) { for (i = 0; i < 4; i++) { p_tcp->remote_ip[i] = cpu_to_le32(p_conn->remote_ip[i]); p_tcp->local_ip[i] = cpu_to_le32(p_conn->local_ip[i]); } } else { p_tcp->remote_ip[0] = cpu_to_le32(p_conn->remote_ip[0]); p_tcp->local_ip[0] = cpu_to_le32(p_conn->local_ip[0]); } p_tcp->flow_label = cpu_to_le32(p_conn->flow_label); p_tcp->ttl = p_conn->ttl; p_tcp->tos_or_tc = p_conn->tos_or_tc; p_tcp->remote_port = cpu_to_le16(p_conn->remote_port); p_tcp->local_port = cpu_to_le16(p_conn->local_port); p_tcp->mss = cpu_to_le16(p_conn->mss); p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale; p_tcp->connect_mode = p_conn->connect_mode; p_tcp->cwnd = cpu_to_le32(p_conn->cwnd); p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt; p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout); p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time); p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval); return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_nvmetcp_conn_update(struct qed_hwfn *p_hwfn, struct qed_nvmetcp_conn *p_conn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { struct nvmetcp_conn_update_ramrod_params *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; u32 dval; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_conn->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, NVMETCP_RAMROD_CMD_ID_UPDATE_CONN, PROTOCOLID_TCP_ULP, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.nvmetcp_conn_update; p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); p_ramrod->flags = p_conn->update_flag; p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size); dval = p_conn->max_recv_pdu_length; p_ramrod->max_recv_pdu_length = cpu_to_le32(dval); dval = p_conn->max_send_pdu_length; p_ramrod->max_send_pdu_length = cpu_to_le32(dval); p_ramrod->first_seq_length = cpu_to_le32(p_conn->first_seq_length); return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_nvmetcp_conn_terminate(struct qed_hwfn *p_hwfn, struct qed_nvmetcp_conn *p_conn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { struct nvmetcp_spe_conn_termination *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_conn->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN, PROTOCOLID_TCP_ULP, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.nvmetcp_conn_terminate; p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); p_ramrod->abortive = p_conn->abortive_dsconnect; return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_nvmetcp_conn_clear_sq(struct qed_hwfn *p_hwfn, struct qed_nvmetcp_conn *p_conn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_conn->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, NVMETCP_RAMROD_CMD_ID_CLEAR_SQ, PROTOCOLID_TCP_ULP, &init_data); if (rc) return rc; return qed_spq_post(p_hwfn, p_ent, NULL); } static void __iomem *qed_nvmetcp_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) { return (u8 __iomem *)p_hwfn->doorbells + qed_db_addr(cid, DQ_DEMS_LEGACY); } static int qed_nvmetcp_allocate_connection(struct qed_hwfn *p_hwfn, struct qed_nvmetcp_conn **p_out_conn) { struct qed_chain_init_params params = { .mode = QED_CHAIN_MODE_PBL, .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, .cnt_type = QED_CHAIN_CNT_TYPE_U16, }; struct qed_nvmetcp_pf_params *p_params = NULL; struct qed_nvmetcp_conn *p_conn = NULL; int rc = 0; /* Try finding a free connection that can be used */ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock); if (!list_empty(&p_hwfn->p_nvmetcp_info->free_list)) p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list, struct qed_nvmetcp_conn, list_entry); if (p_conn) { list_del(&p_conn->list_entry); spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock); *p_out_conn = p_conn; return 0; } spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock); /* Need to allocate a new connection */ p_params = &p_hwfn->pf_params.nvmetcp_pf_params; p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL); if (!p_conn) return -ENOMEM; params.num_elems = p_params->num_r2tq_pages_in_ring * QED_CHAIN_PAGE_SIZE / sizeof(struct nvmetcp_wqe); params.elem_size = sizeof(struct nvmetcp_wqe); rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, &params); if (rc) goto nomem_r2tq; params.num_elems = p_params->num_uhq_pages_in_ring * QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe); params.elem_size = sizeof(struct iscsi_uhqe); rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, &params); if (rc) goto nomem_uhq; params.elem_size = sizeof(struct iscsi_xhqe); rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, &params); if (rc) goto nomem; p_conn->free_on_delete = true; *p_out_conn = p_conn; return 0; nomem: qed_chain_free(p_hwfn->cdev, &p_conn->uhq); nomem_uhq: qed_chain_free(p_hwfn->cdev, &p_conn->r2tq); nomem_r2tq: kfree(p_conn); return -ENOMEM; } static int qed_nvmetcp_acquire_connection(struct qed_hwfn *p_hwfn, struct qed_nvmetcp_conn **p_out_conn) { struct qed_nvmetcp_conn *p_conn = NULL; int rc = 0; u32 icid; spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock); rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_TCP_ULP, &icid); spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock); if (rc) return rc; rc = qed_nvmetcp_allocate_connection(p_hwfn, &p_conn); if (rc) { spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock); qed_cxt_release_cid(p_hwfn, icid); spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock); return rc; } p_conn->icid = icid; p_conn->conn_id = (u16)icid; p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid; *p_out_conn = p_conn; return rc; } static void qed_nvmetcp_release_connection(struct qed_hwfn *p_hwfn, struct qed_nvmetcp_conn *p_conn) { spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock); list_add_tail(&p_conn->list_entry, &p_hwfn->p_nvmetcp_info->free_list); qed_cxt_release_cid(p_hwfn, p_conn->icid); spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock); } static void qed_nvmetcp_free_connection(struct qed_hwfn *p_hwfn, struct qed_nvmetcp_conn *p_conn) { qed_chain_free(p_hwfn->cdev, &p_conn->xhq); qed_chain_free(p_hwfn->cdev, &p_conn->uhq); qed_chain_free(p_hwfn->cdev, &p_conn->r2tq); kfree(p_conn); } int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn) { struct qed_nvmetcp_info *p_nvmetcp_info; p_nvmetcp_info = kzalloc(sizeof(*p_nvmetcp_info), GFP_KERNEL); if (!p_nvmetcp_info) return -ENOMEM; INIT_LIST_HEAD(&p_nvmetcp_info->free_list); p_hwfn->p_nvmetcp_info = p_nvmetcp_info; return 0; } void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn) { spin_lock_init(&p_hwfn->p_nvmetcp_info->lock); } void qed_nvmetcp_free(struct qed_hwfn *p_hwfn) { struct qed_nvmetcp_conn *p_conn = NULL; if (!p_hwfn->p_nvmetcp_info) return; while (!list_empty(&p_hwfn->p_nvmetcp_info->free_list)) { p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list, struct qed_nvmetcp_conn, list_entry); if (p_conn) { list_del(&p_conn->list_entry); qed_nvmetcp_free_connection(p_hwfn, p_conn); } } kfree(p_hwfn->p_nvmetcp_info); p_hwfn->p_nvmetcp_info = NULL; } static int qed_nvmetcp_acquire_conn(struct qed_dev *cdev, u32 *handle, u32 *fw_cid, void __iomem **p_doorbell) { struct qed_hash_nvmetcp_con *hash_con; int rc; /* Allocate a hashed connection */ hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC); if (!hash_con) return -ENOMEM; /* Acquire the connection */ rc = qed_nvmetcp_acquire_connection(QED_AFFIN_HWFN(cdev), &hash_con->con); if (rc) { DP_NOTICE(cdev, "Failed to acquire Connection\n"); kfree(hash_con); return rc; } /* Added the connection to hash table */ *handle = hash_con->con->icid; *fw_cid = hash_con->con->fw_cid; hash_add(cdev->connections, &hash_con->node, *handle); if (p_doorbell) *p_doorbell = qed_nvmetcp_get_db_addr(QED_AFFIN_HWFN(cdev), *handle); return 0; } static int qed_nvmetcp_release_conn(struct qed_dev *cdev, u32 handle) { struct qed_hash_nvmetcp_con *hash_con; hash_con = qed_nvmetcp_get_hash(cdev, handle); if (!hash_con) { DP_NOTICE(cdev, "Failed to find connection for handle %d\n", handle); return -EINVAL; } hlist_del(&hash_con->node); qed_nvmetcp_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con); kfree(hash_con); return 0; } static int qed_nvmetcp_offload_conn(struct qed_dev *cdev, u32 handle, struct qed_nvmetcp_params_offload *conn_info) { struct qed_hash_nvmetcp_con *hash_con; struct qed_nvmetcp_conn *con; hash_con = qed_nvmetcp_get_hash(cdev, handle); if (!hash_con) { DP_NOTICE(cdev, "Failed to find connection for handle %d\n", handle); return -EINVAL; } /* Update the connection with information from the params */ con = hash_con->con; /* FW initializations */ con->layer_code = NVMETCP_SLOW_PATH_LAYER_CODE; con->sq_pbl_addr = conn_info->sq_pbl_addr; con->nvmetcp_cccid_max_range = conn_info->nvmetcp_cccid_max_range; con->nvmetcp_cccid_itid_table_addr = conn_info->nvmetcp_cccid_itid_table_addr; con->default_cq = conn_info->default_cq; SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE, 0); SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE, 1); SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B, 1); /* Networking and TCP stack initializations */ ether_addr_copy(con->local_mac, conn_info->src.mac); ether_addr_copy(con->remote_mac, conn_info->dst.mac); memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip)); memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip)); con->local_port = conn_info->src.port; con->remote_port = conn_info->dst.port; con->vlan_id = conn_info->vlan_id; if (conn_info->timestamp_en) SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, 1); if (conn_info->delayed_ack_en) SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, 1); if (conn_info->tcp_keep_alive_en) SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_KA_EN, 1); if (conn_info->ecn_en) SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_ECN_EN, 1); con->ip_version = conn_info->ip_version; con->flow_label = QED_TCP_FLOW_LABEL; con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt; con->ka_timeout = conn_info->ka_timeout; con->ka_interval = conn_info->ka_interval; con->max_rt_time = conn_info->max_rt_time; con->ttl = conn_info->ttl; con->tos_or_tc = conn_info->tos_or_tc; con->mss = conn_info->mss; con->cwnd = conn_info->cwnd; con->rcv_wnd_scale = conn_info->rcv_wnd_scale; con->connect_mode = 0; return qed_sp_nvmetcp_conn_offload(QED_AFFIN_HWFN(cdev), con, QED_SPQ_MODE_EBLOCK, NULL); } static int qed_nvmetcp_update_conn(struct qed_dev *cdev, u32 handle, struct qed_nvmetcp_params_update *conn_info) { struct qed_hash_nvmetcp_con *hash_con; struct qed_nvmetcp_conn *con; hash_con = qed_nvmetcp_get_hash(cdev, handle); if (!hash_con) { DP_NOTICE(cdev, "Failed to find connection for handle %d\n", handle); return -EINVAL; } /* Update the connection with information from the params */ con = hash_con->con; SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T, 0); SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA, 1); if (conn_info->hdr_digest_en) SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, 1); if (conn_info->data_digest_en) SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, 1); /* Placeholder - initialize pfv, cpda, hpda */ con->max_seq_size = conn_info->max_io_size; con->max_recv_pdu_length = conn_info->max_recv_pdu_length; con->max_send_pdu_length = conn_info->max_send_pdu_length; con->first_seq_length = conn_info->max_io_size; return qed_sp_nvmetcp_conn_update(QED_AFFIN_HWFN(cdev), con, QED_SPQ_MODE_EBLOCK, NULL); } static int qed_nvmetcp_clear_conn_sq(struct qed_dev *cdev, u32 handle) { struct qed_hash_nvmetcp_con *hash_con; hash_con = qed_nvmetcp_get_hash(cdev, handle); if (!hash_con) { DP_NOTICE(cdev, "Failed to find connection for handle %d\n", handle); return -EINVAL; } return qed_sp_nvmetcp_conn_clear_sq(QED_AFFIN_HWFN(cdev), hash_con->con, QED_SPQ_MODE_EBLOCK, NULL); } static int qed_nvmetcp_destroy_conn(struct qed_dev *cdev, u32 handle, u8 abrt_conn) { struct qed_hash_nvmetcp_con *hash_con; hash_con = qed_nvmetcp_get_hash(cdev, handle); if (!hash_con) { DP_NOTICE(cdev, "Failed to find connection for handle %d\n", handle); return -EINVAL; } hash_con->con->abortive_dsconnect = abrt_conn; return qed_sp_nvmetcp_conn_terminate(QED_AFFIN_HWFN(cdev), hash_con->con, QED_SPQ_MODE_EBLOCK, NULL); } static const struct qed_nvmetcp_ops qed_nvmetcp_ops_pass = { .common = &qed_common_ops_pass, .ll2 = &qed_ll2_ops_pass, .fill_dev_info = &qed_fill_nvmetcp_dev_info, .register_ops = &qed_register_nvmetcp_ops, .start = &qed_nvmetcp_start, .stop = &qed_nvmetcp_stop, .acquire_conn = &qed_nvmetcp_acquire_conn, .release_conn = &qed_nvmetcp_release_conn, .offload_conn = &qed_nvmetcp_offload_conn, .update_conn = &qed_nvmetcp_update_conn, .destroy_conn = &qed_nvmetcp_destroy_conn, .clear_sq = &qed_nvmetcp_clear_conn_sq, .add_src_tcp_port_filter = &qed_llh_add_src_tcp_port_filter, .remove_src_tcp_port_filter = &qed_llh_remove_src_tcp_port_filter, .add_dst_tcp_port_filter = &qed_llh_add_dst_tcp_port_filter, .remove_dst_tcp_port_filter = &qed_llh_remove_dst_tcp_port_filter, .clear_all_filters = &qed_llh_clear_all_filters, .init_read_io = &init_nvmetcp_host_read_task, .init_write_io = &init_nvmetcp_host_write_task, .init_icreq_exchange = &init_nvmetcp_init_conn_req_task, .init_task_cleanup = &init_cleanup_task_nvmetcp }; const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void) { return &qed_nvmetcp_ops_pass; } EXPORT_SYMBOL(qed_get_nvmetcp_ops); void qed_put_nvmetcp_ops(void) { } EXPORT_SYMBOL(qed_put_nvmetcp_ops);
linux-master
drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/string.h> #include "qed.h" #include "qed_iscsi.h" #include "qed_ll2.h" #include "qed_ooo.h" #include "qed_cxt.h" #include "qed_nvmetcp.h" static struct qed_ooo_archipelago *qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, u32 cid) { u32 idx = (cid & 0xffff) - p_ooo_info->cid_base; struct qed_ooo_archipelago *p_archipelago; if (unlikely(idx >= p_ooo_info->max_num_archipelagos)) return NULL; p_archipelago = &p_ooo_info->p_archipelagos_mem[idx]; if (unlikely(list_empty(&p_archipelago->isles_list))) return NULL; return p_archipelago; } static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, u32 cid, u8 isle) { struct qed_ooo_archipelago *p_archipelago = NULL; struct qed_ooo_isle *p_isle = NULL; u8 the_num_of_isle = 1; p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); if (unlikely(!p_archipelago)) { DP_NOTICE(p_hwfn, "Connection %d is not found in OOO list\n", cid); return NULL; } list_for_each_entry(p_isle, &p_archipelago->isles_list, list_entry) { if (the_num_of_isle == isle) return p_isle; the_num_of_isle++; } return NULL; } void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, struct ooo_opaque *p_cqe) { struct qed_ooo_history *p_history = &p_ooo_info->ooo_history; if (p_history->head_idx == p_history->num_of_cqes) p_history->head_idx = 0; p_history->p_cqes[p_history->head_idx] = *p_cqe; p_history->head_idx++; } int qed_ooo_alloc(struct qed_hwfn *p_hwfn) { u16 max_num_archipelagos = 0, cid_base; struct qed_ooo_info *p_ooo_info; enum protocol_type proto; u16 max_num_isles = 0; u32 i; switch (p_hwfn->hw_info.personality) { case QED_PCI_ISCSI: case QED_PCI_NVMETCP: proto = PROTOCOLID_TCP_ULP; break; case QED_PCI_ETH_RDMA: case QED_PCI_ETH_IWARP: proto = PROTOCOLID_IWARP; break; default: DP_NOTICE(p_hwfn, "Failed to allocate qed_ooo_info: unknown personality\n"); return -EINVAL; } max_num_archipelagos = (u16)qed_cxt_get_proto_cid_count(p_hwfn, proto, NULL); max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos; cid_base = (u16)qed_cxt_get_proto_cid_start(p_hwfn, proto); if (!max_num_archipelagos) { DP_NOTICE(p_hwfn, "Failed to allocate qed_ooo_info: unknown amount of connections\n"); return -EINVAL; } p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL); if (!p_ooo_info) return -ENOMEM; p_ooo_info->cid_base = cid_base; p_ooo_info->max_num_archipelagos = max_num_archipelagos; INIT_LIST_HEAD(&p_ooo_info->free_buffers_list); INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list); INIT_LIST_HEAD(&p_ooo_info->free_isles_list); p_ooo_info->p_isles_mem = kcalloc(max_num_isles, sizeof(struct qed_ooo_isle), GFP_KERNEL); if (!p_ooo_info->p_isles_mem) goto no_isles_mem; for (i = 0; i < max_num_isles; i++) { INIT_LIST_HEAD(&p_ooo_info->p_isles_mem[i].buffers_list); list_add_tail(&p_ooo_info->p_isles_mem[i].list_entry, &p_ooo_info->free_isles_list); } p_ooo_info->p_archipelagos_mem = kcalloc(max_num_archipelagos, sizeof(struct qed_ooo_archipelago), GFP_KERNEL); if (!p_ooo_info->p_archipelagos_mem) goto no_archipelagos_mem; for (i = 0; i < max_num_archipelagos; i++) INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list); p_ooo_info->ooo_history.p_cqes = kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES, sizeof(struct ooo_opaque), GFP_KERNEL); if (!p_ooo_info->ooo_history.p_cqes) goto no_history_mem; p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES; p_hwfn->p_ooo_info = p_ooo_info; return 0; no_history_mem: kfree(p_ooo_info->p_archipelagos_mem); no_archipelagos_mem: kfree(p_ooo_info->p_isles_mem); no_isles_mem: kfree(p_ooo_info); return -ENOMEM; } void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, u32 cid) { struct qed_ooo_archipelago *p_archipelago; struct qed_ooo_buffer *p_buffer; struct qed_ooo_isle *p_isle; p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); if (!p_archipelago) return; while (!list_empty(&p_archipelago->isles_list)) { p_isle = list_first_entry(&p_archipelago->isles_list, struct qed_ooo_isle, list_entry); list_del(&p_isle->list_entry); while (!list_empty(&p_isle->buffers_list)) { p_buffer = list_first_entry(&p_isle->buffers_list, struct qed_ooo_buffer, list_entry); if (!p_buffer) break; list_move_tail(&p_buffer->list_entry, &p_ooo_info->free_buffers_list); } list_add_tail(&p_isle->list_entry, &p_ooo_info->free_isles_list); } } void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info) { struct qed_ooo_archipelago *p_archipelago; struct qed_ooo_buffer *p_buffer; struct qed_ooo_isle *p_isle; u32 i; for (i = 0; i < p_ooo_info->max_num_archipelagos; i++) { p_archipelago = &(p_ooo_info->p_archipelagos_mem[i]); while (!list_empty(&p_archipelago->isles_list)) { p_isle = list_first_entry(&p_archipelago->isles_list, struct qed_ooo_isle, list_entry); list_del(&p_isle->list_entry); while (!list_empty(&p_isle->buffers_list)) { p_buffer = list_first_entry(&p_isle->buffers_list, struct qed_ooo_buffer, list_entry); if (!p_buffer) break; list_move_tail(&p_buffer->list_entry, &p_ooo_info->free_buffers_list); } list_add_tail(&p_isle->list_entry, &p_ooo_info->free_isles_list); } } if (!list_empty(&p_ooo_info->ready_buffers_list)) list_splice_tail_init(&p_ooo_info->ready_buffers_list, &p_ooo_info->free_buffers_list); } void qed_ooo_setup(struct qed_hwfn *p_hwfn) { qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); memset(p_hwfn->p_ooo_info->ooo_history.p_cqes, 0, p_hwfn->p_ooo_info->ooo_history.num_of_cqes * sizeof(struct ooo_opaque)); p_hwfn->p_ooo_info->ooo_history.head_idx = 0; } void qed_ooo_free(struct qed_hwfn *p_hwfn) { struct qed_ooo_info *p_ooo_info = p_hwfn->p_ooo_info; struct qed_ooo_buffer *p_buffer; if (!p_ooo_info) return; qed_ooo_release_all_isles(p_hwfn, p_ooo_info); while (!list_empty(&p_ooo_info->free_buffers_list)) { p_buffer = list_first_entry(&p_ooo_info->free_buffers_list, struct qed_ooo_buffer, list_entry); if (!p_buffer) break; list_del(&p_buffer->list_entry); dma_free_coherent(&p_hwfn->cdev->pdev->dev, p_buffer->rx_buffer_size, p_buffer->rx_buffer_virt_addr, p_buffer->rx_buffer_phys_addr); kfree(p_buffer); } kfree(p_ooo_info->p_isles_mem); kfree(p_ooo_info->p_archipelagos_mem); kfree(p_ooo_info->ooo_history.p_cqes); kfree(p_ooo_info); p_hwfn->p_ooo_info = NULL; } void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, struct qed_ooo_buffer *p_buffer) { list_add_tail(&p_buffer->list_entry, &p_ooo_info->free_buffers_list); } struct qed_ooo_buffer *qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info) { struct qed_ooo_buffer *p_buffer = NULL; if (!list_empty(&p_ooo_info->free_buffers_list)) { p_buffer = list_first_entry(&p_ooo_info->free_buffers_list, struct qed_ooo_buffer, list_entry); list_del(&p_buffer->list_entry); } return p_buffer; } void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, struct qed_ooo_buffer *p_buffer, u8 on_tail) { if (on_tail) list_add_tail(&p_buffer->list_entry, &p_ooo_info->ready_buffers_list); else list_add(&p_buffer->list_entry, &p_ooo_info->ready_buffers_list); } struct qed_ooo_buffer *qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info) { struct qed_ooo_buffer *p_buffer = NULL; if (!list_empty(&p_ooo_info->ready_buffers_list)) { p_buffer = list_first_entry(&p_ooo_info->ready_buffers_list, struct qed_ooo_buffer, list_entry); list_del(&p_buffer->list_entry); } return p_buffer; } void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, u32 cid, u8 drop_isle, u8 drop_size) { struct qed_ooo_isle *p_isle = NULL; u8 isle_idx; for (isle_idx = 0; isle_idx < drop_size; isle_idx++) { p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, drop_isle); if (!p_isle) { DP_NOTICE(p_hwfn, "Isle %d is not found(cid %d)\n", drop_isle, cid); return; } if (list_empty(&p_isle->buffers_list)) DP_NOTICE(p_hwfn, "Isle %d is empty(cid %d)\n", drop_isle, cid); else list_splice_tail_init(&p_isle->buffers_list, &p_ooo_info->free_buffers_list); list_del(&p_isle->list_entry); p_ooo_info->cur_isles_number--; list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list); } } void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, u32 cid, u8 ooo_isle, struct qed_ooo_buffer *p_buffer) { struct qed_ooo_archipelago *p_archipelago = NULL; struct qed_ooo_isle *p_prev_isle = NULL; struct qed_ooo_isle *p_isle = NULL; if (ooo_isle > 1) { p_prev_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle - 1); if (unlikely(!p_prev_isle)) { DP_NOTICE(p_hwfn, "Isle %d is not found(cid %d)\n", ooo_isle - 1, cid); return; } } p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); if (unlikely(!p_archipelago && ooo_isle != 1)) { DP_NOTICE(p_hwfn, "Connection %d is not found in OOO list\n", cid); return; } if (!list_empty(&p_ooo_info->free_isles_list)) { p_isle = list_first_entry(&p_ooo_info->free_isles_list, struct qed_ooo_isle, list_entry); list_del(&p_isle->list_entry); if (unlikely(!list_empty(&p_isle->buffers_list))) { DP_NOTICE(p_hwfn, "Free isle is not empty\n"); INIT_LIST_HEAD(&p_isle->buffers_list); } } else { DP_NOTICE(p_hwfn, "No more free isles\n"); return; } if (!p_archipelago) { u32 idx = (cid & 0xffff) - p_ooo_info->cid_base; p_archipelago = &p_ooo_info->p_archipelagos_mem[idx]; } list_add(&p_buffer->list_entry, &p_isle->buffers_list); p_ooo_info->cur_isles_number++; p_ooo_info->gen_isles_number++; if (p_ooo_info->cur_isles_number > p_ooo_info->max_isles_number) p_ooo_info->max_isles_number = p_ooo_info->cur_isles_number; if (!p_prev_isle) list_add(&p_isle->list_entry, &p_archipelago->isles_list); else list_add(&p_isle->list_entry, &p_prev_isle->list_entry); } void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, u32 cid, u8 ooo_isle, struct qed_ooo_buffer *p_buffer, u8 buffer_side) { struct qed_ooo_isle *p_isle = NULL; p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle); if (unlikely(!p_isle)) { DP_NOTICE(p_hwfn, "Isle %d is not found(cid %d)\n", ooo_isle, cid); return; } if (unlikely(buffer_side == QED_OOO_LEFT_BUF)) list_add(&p_buffer->list_entry, &p_isle->buffers_list); else list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list); } void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, u32 cid, u8 left_isle) { struct qed_ooo_isle *p_right_isle = NULL; struct qed_ooo_isle *p_left_isle = NULL; p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, left_isle + 1); if (unlikely(!p_right_isle)) { DP_NOTICE(p_hwfn, "Right isle %d is not found(cid %d)\n", left_isle + 1, cid); return; } list_del(&p_right_isle->list_entry); p_ooo_info->cur_isles_number--; if (left_isle) { p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, left_isle); if (unlikely(!p_left_isle)) { DP_NOTICE(p_hwfn, "Left isle %d is not found(cid %d)\n", left_isle, cid); return; } list_splice_tail_init(&p_right_isle->buffers_list, &p_left_isle->buffers_list); } else { list_splice_tail_init(&p_right_isle->buffers_list, &p_ooo_info->ready_buffers_list); } list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list); }
linux-master
drivers/net/ethernet/qlogic/qed/qed_ooo.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> #include <asm/byteorder.h> #include <asm/param.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/log2.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/workqueue.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/qed/qed_iscsi_if.h> #include "qed.h" #include "qed_cxt.h" #include "qed_dev_api.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_iro_hsi.h" #include "qed_iscsi.h" #include "qed_ll2.h" #include "qed_mcp.h" #include "qed_sp.h" #include "qed_sriov.h" #include "qed_reg_addr.h" struct qed_iscsi_conn { struct list_head list_entry; bool free_on_delete; u16 conn_id; u32 icid; u32 fw_cid; u8 layer_code; u8 offl_flags; u8 connect_mode; u32 initial_ack; dma_addr_t sq_pbl_addr; struct qed_chain r2tq; struct qed_chain xhq; struct qed_chain uhq; struct tcp_upload_params *tcp_upload_params_virt_addr; dma_addr_t tcp_upload_params_phys_addr; struct scsi_terminate_extra_params *queue_cnts_virt_addr; dma_addr_t queue_cnts_phys_addr; dma_addr_t syn_phy_addr; u16 syn_ip_payload_length; u8 local_mac[6]; u8 remote_mac[6]; u16 vlan_id; u16 tcp_flags; u8 ip_version; u32 remote_ip[4]; u32 local_ip[4]; u8 ka_max_probe_cnt; u8 dup_ack_theshold; u32 rcv_next; u32 snd_una; u32 snd_next; u32 snd_max; u32 snd_wnd; u32 rcv_wnd; u32 snd_wl1; u32 cwnd; u32 ss_thresh; u16 srtt; u16 rtt_var; u32 ts_recent; u32 ts_recent_age; u32 total_rt; u32 ka_timeout_delta; u32 rt_timeout_delta; u8 dup_ack_cnt; u8 snd_wnd_probe_cnt; u8 ka_probe_cnt; u8 rt_cnt; u32 flow_label; u32 ka_timeout; u32 ka_interval; u32 max_rt_time; u32 initial_rcv_wnd; u8 ttl; u8 tos_or_tc; u16 remote_port; u16 local_port; u16 mss; u8 snd_wnd_scale; u8 rcv_wnd_scale; u16 da_timeout_value; u8 ack_frequency; u8 update_flag; u8 default_cq; u32 max_seq_size; u32 max_recv_pdu_length; u32 max_send_pdu_length; u32 first_seq_length; u32 exp_stat_sn; u32 stat_sn; u16 physical_q0; u16 physical_q1; u8 abortive_dsconnect; }; static int qed_iscsi_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, __le16 echo, union event_ring_data *data, u8 fw_return_code) { if (p_hwfn->p_iscsi_info->event_cb) { struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; return p_iscsi->event_cb(p_iscsi->event_context, fw_event_code, data); } else { DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n"); return -EINVAL; } } static int qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr, void *event_context, iscsi_event_cb_t async_event_cb) { struct iscsi_init_ramrod_params *p_ramrod = NULL; struct scsi_init_func_queues *p_queue = NULL; struct qed_iscsi_pf_params *p_params = NULL; struct iscsi_spe_func_init *p_init = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = 0; u32 dval; u16 val; u8 i; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, ISCSI_RAMROD_CMD_ID_INIT_FUNC, PROTOCOLID_TCP_ULP, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.iscsi_init; p_init = &p_ramrod->iscsi_init_spe; p_params = &p_hwfn->pf_params.iscsi_pf_params; p_queue = &p_init->q_params; /* Sanity */ if (p_params->num_queues > p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]) { DP_ERR(p_hwfn, "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n", p_params->num_queues, p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]); qed_sp_destroy_request(p_hwfn, p_ent); return -EINVAL; } val = p_params->half_way_close_timeout; p_init->half_way_close_timeout = cpu_to_le16(val); p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring; p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring; p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring; p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + p_params->ll2_ooo_queue_id; p_init->func_params.log_page_size = p_params->log_page_size; val = p_params->num_tasks; p_init->func_params.num_tasks = cpu_to_le16(val); p_init->debug_mode.flags = p_params->debug_mode; DMA_REGPAIR_LE(p_queue->glbl_q_params_addr, p_params->glbl_q_params_addr); val = p_params->cq_num_entries; p_queue->cq_num_entries = cpu_to_le16(val); val = p_params->cmdq_num_entries; p_queue->cmdq_num_entries = cpu_to_le16(val); p_queue->num_queues = p_params->num_queues; dval = (u8)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS]; p_queue->queue_relative_offset = (u8)dval; p_queue->cq_sb_pi = p_params->gl_rq_pi; p_queue->cmdq_sb_pi = p_params->gl_cmd_pi; for (i = 0; i < p_params->num_queues; i++) { val = qed_get_igu_sb_id(p_hwfn, i); p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val); } p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ); DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ], p_params->bdq_pbl_base_addr[BDQ_ID_RQ]); p_queue->bdq_pbl_num_entries[BDQ_ID_RQ] = p_params->bdq_pbl_num_entries[BDQ_ID_RQ]; val = p_params->bdq_xoff_threshold[BDQ_ID_RQ]; p_queue->bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(val); val = p_params->bdq_xon_threshold[BDQ_ID_RQ]; p_queue->bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(val); DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_IMM_DATA], p_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]); p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA] = p_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]; val = p_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]; p_queue->bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val); val = p_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]; p_queue->bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val); val = p_params->rq_buffer_size; p_queue->rq_buffer_size = cpu_to_le16(val); if (p_params->is_target) { SET_FIELD(p_queue->q_validity, SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); if (p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]) SET_FIELD(p_queue->q_validity, SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1); SET_FIELD(p_queue->q_validity, SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1); } else { SET_FIELD(p_queue->q_validity, SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); } p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer); val = p_params->tx_sws_timer; p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val); p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt; p_hwfn->p_iscsi_info->event_context = event_context; p_hwfn->p_iscsi_info->event_cb = async_event_cb; qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP, qed_iscsi_async_event); return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, struct qed_iscsi_conn *p_conn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { struct iscsi_spe_conn_offload *p_ramrod = NULL; struct tcp_offload_params_opt2 *p_tcp2 = NULL; struct tcp_offload_params *p_tcp = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; dma_addr_t r2tq_pbl_addr; dma_addr_t xhq_pbl_addr; dma_addr_t uhq_pbl_addr; u16 physical_q; __le16 tmp; int rc = 0; u32 dval; u16 wval; u16 *p; u8 i; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_conn->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN, PROTOCOLID_TCP_ULP, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.iscsi_conn_offload; /* Transmission PQ is the first of the PF */ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); p_conn->physical_q0 = physical_q; p_ramrod->iscsi.physical_q0 = cpu_to_le16(physical_q); /* iSCSI Pure-ACK PQ */ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); p_conn->physical_q1 = physical_q; p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q); p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); DMA_REGPAIR_LE(p_ramrod->iscsi.sq_pbl_addr, p_conn->sq_pbl_addr); r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq); DMA_REGPAIR_LE(p_ramrod->iscsi.r2tq_pbl_addr, r2tq_pbl_addr); xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq); DMA_REGPAIR_LE(p_ramrod->iscsi.xhq_pbl_addr, xhq_pbl_addr); uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq); DMA_REGPAIR_LE(p_ramrod->iscsi.uhq_pbl_addr, uhq_pbl_addr); p_ramrod->iscsi.initial_ack = cpu_to_le32(p_conn->initial_ack); p_ramrod->iscsi.flags = p_conn->offl_flags; p_ramrod->iscsi.default_cq = p_conn->default_cq; p_ramrod->iscsi.stat_sn = cpu_to_le32(p_conn->stat_sn); if (!GET_FIELD(p_ramrod->iscsi.flags, ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B)) { p_tcp = &p_ramrod->tcp; p = (u16 *)p_conn->local_mac; tmp = cpu_to_le16(get_unaligned_be16(p)); p_tcp->local_mac_addr_hi = tmp; tmp = cpu_to_le16(get_unaligned_be16(p + 1)); p_tcp->local_mac_addr_mid = tmp; tmp = cpu_to_le16(get_unaligned_be16(p + 2)); p_tcp->local_mac_addr_lo = tmp; p = (u16 *)p_conn->remote_mac; tmp = cpu_to_le16(get_unaligned_be16(p)); p_tcp->remote_mac_addr_hi = tmp; tmp = cpu_to_le16(get_unaligned_be16(p + 1)); p_tcp->remote_mac_addr_mid = tmp; tmp = cpu_to_le16(get_unaligned_be16(p + 2)); p_tcp->remote_mac_addr_lo = tmp; p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id); p_tcp->flags = cpu_to_le16(p_conn->tcp_flags); p_tcp->ip_version = p_conn->ip_version; for (i = 0; i < 4; i++) { dval = p_conn->remote_ip[i]; p_tcp->remote_ip[i] = cpu_to_le32(dval); dval = p_conn->local_ip[i]; p_tcp->local_ip[i] = cpu_to_le32(dval); } p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt; p_tcp->dup_ack_theshold = p_conn->dup_ack_theshold; p_tcp->rcv_next = cpu_to_le32(p_conn->rcv_next); p_tcp->snd_una = cpu_to_le32(p_conn->snd_una); p_tcp->snd_next = cpu_to_le32(p_conn->snd_next); p_tcp->snd_max = cpu_to_le32(p_conn->snd_max); p_tcp->snd_wnd = cpu_to_le32(p_conn->snd_wnd); p_tcp->rcv_wnd = cpu_to_le32(p_conn->rcv_wnd); p_tcp->snd_wl1 = cpu_to_le32(p_conn->snd_wl1); p_tcp->cwnd = cpu_to_le32(p_conn->cwnd); p_tcp->ss_thresh = cpu_to_le32(p_conn->ss_thresh); p_tcp->srtt = cpu_to_le16(p_conn->srtt); p_tcp->rtt_var = cpu_to_le16(p_conn->rtt_var); p_tcp->ts_recent = cpu_to_le32(p_conn->ts_recent); p_tcp->ts_recent_age = cpu_to_le32(p_conn->ts_recent_age); p_tcp->total_rt = cpu_to_le32(p_conn->total_rt); dval = p_conn->ka_timeout_delta; p_tcp->ka_timeout_delta = cpu_to_le32(dval); dval = p_conn->rt_timeout_delta; p_tcp->rt_timeout_delta = cpu_to_le32(dval); p_tcp->dup_ack_cnt = p_conn->dup_ack_cnt; p_tcp->snd_wnd_probe_cnt = p_conn->snd_wnd_probe_cnt; p_tcp->ka_probe_cnt = p_conn->ka_probe_cnt; p_tcp->rt_cnt = p_conn->rt_cnt; p_tcp->flow_label = cpu_to_le32(p_conn->flow_label); p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout); p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval); p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time); dval = p_conn->initial_rcv_wnd; p_tcp->initial_rcv_wnd = cpu_to_le32(dval); p_tcp->ttl = p_conn->ttl; p_tcp->tos_or_tc = p_conn->tos_or_tc; p_tcp->remote_port = cpu_to_le16(p_conn->remote_port); p_tcp->local_port = cpu_to_le16(p_conn->local_port); p_tcp->mss = cpu_to_le16(p_conn->mss); p_tcp->snd_wnd_scale = p_conn->snd_wnd_scale; p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale; wval = p_conn->da_timeout_value; p_tcp->da_timeout_value = cpu_to_le16(wval); p_tcp->ack_frequency = p_conn->ack_frequency; p_tcp->connect_mode = p_conn->connect_mode; } else { p_tcp2 = &((struct iscsi_spe_conn_offload_option2 *)p_ramrod)->tcp; p = (u16 *)p_conn->local_mac; tmp = cpu_to_le16(get_unaligned_be16(p)); p_tcp2->local_mac_addr_hi = tmp; tmp = cpu_to_le16(get_unaligned_be16(p + 1)); p_tcp2->local_mac_addr_mid = tmp; tmp = cpu_to_le16(get_unaligned_be16(p + 2)); p_tcp2->local_mac_addr_lo = tmp; p = (u16 *)p_conn->remote_mac; tmp = cpu_to_le16(get_unaligned_be16(p)); p_tcp2->remote_mac_addr_hi = tmp; tmp = cpu_to_le16(get_unaligned_be16(p + 1)); p_tcp2->remote_mac_addr_mid = tmp; tmp = cpu_to_le16(get_unaligned_be16(p + 2)); p_tcp2->remote_mac_addr_lo = tmp; p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id); p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags); p_tcp2->ip_version = p_conn->ip_version; for (i = 0; i < 4; i++) { dval = p_conn->remote_ip[i]; p_tcp2->remote_ip[i] = cpu_to_le32(dval); dval = p_conn->local_ip[i]; p_tcp2->local_ip[i] = cpu_to_le32(dval); } p_tcp2->flow_label = cpu_to_le32(p_conn->flow_label); p_tcp2->ttl = p_conn->ttl; p_tcp2->tos_or_tc = p_conn->tos_or_tc; p_tcp2->remote_port = cpu_to_le16(p_conn->remote_port); p_tcp2->local_port = cpu_to_le16(p_conn->local_port); p_tcp2->mss = cpu_to_le16(p_conn->mss); p_tcp2->rcv_wnd_scale = p_conn->rcv_wnd_scale; p_tcp2->connect_mode = p_conn->connect_mode; wval = p_conn->syn_ip_payload_length; p_tcp2->syn_ip_payload_length = cpu_to_le16(wval); p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr); p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr); p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd); p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt; p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout); p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time); p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval); } return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn, struct qed_iscsi_conn *p_conn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { struct iscsi_conn_update_ramrod_params *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc; u32 dval; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_conn->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, PROTOCOLID_TCP_ULP, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.iscsi_conn_update; p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); p_ramrod->flags = p_conn->update_flag; p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size); dval = p_conn->max_recv_pdu_length; p_ramrod->max_recv_pdu_length = cpu_to_le32(dval); dval = p_conn->max_send_pdu_length; p_ramrod->max_send_pdu_length = cpu_to_le32(dval); dval = p_conn->first_seq_length; p_ramrod->first_seq_length = cpu_to_le32(dval); p_ramrod->exp_stat_sn = cpu_to_le32(p_conn->exp_stat_sn); return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn, struct qed_iscsi_conn *p_conn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { struct iscsi_spe_conn_mac_update *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; u8 ucval; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_conn->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, ISCSI_RAMROD_CMD_ID_MAC_UPDATE, PROTOCOLID_TCP_ULP, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.iscsi_conn_mac_update; p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); ucval = p_conn->remote_mac[1]; ((u8 *)(&p_ramrod->remote_mac_addr_hi))[0] = ucval; ucval = p_conn->remote_mac[0]; ((u8 *)(&p_ramrod->remote_mac_addr_hi))[1] = ucval; ucval = p_conn->remote_mac[3]; ((u8 *)(&p_ramrod->remote_mac_addr_mid))[0] = ucval; ucval = p_conn->remote_mac[2]; ((u8 *)(&p_ramrod->remote_mac_addr_mid))[1] = ucval; ucval = p_conn->remote_mac[5]; ((u8 *)(&p_ramrod->remote_mac_addr_lo))[0] = ucval; ucval = p_conn->remote_mac[4]; ((u8 *)(&p_ramrod->remote_mac_addr_lo))[1] = ucval; return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn, struct qed_iscsi_conn *p_conn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { struct iscsi_spe_conn_termination *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_conn->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, ISCSI_RAMROD_CMD_ID_TERMINATION_CONN, PROTOCOLID_TCP_ULP, &init_data); if (rc) return rc; p_ramrod = &p_ent->ramrod.iscsi_conn_terminate; p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); p_ramrod->abortive = p_conn->abortive_dsconnect; DMA_REGPAIR_LE(p_ramrod->query_params_addr, p_conn->tcp_upload_params_phys_addr); DMA_REGPAIR_LE(p_ramrod->queue_cnts_addr, p_conn->queue_cnts_phys_addr); return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn, struct qed_iscsi_conn *p_conn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_conn->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, ISCSI_RAMROD_CMD_ID_CLEAR_SQ, PROTOCOLID_TCP_ULP, &init_data); if (rc) return rc; return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr) { struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = 0; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.comp_mode = comp_mode; init_data.p_comp_data = p_comp_addr; rc = qed_sp_init_request(p_hwfn, &p_ent, ISCSI_RAMROD_CMD_ID_DESTROY_FUNC, PROTOCOLID_TCP_ULP, &init_data); if (rc) return rc; rc = qed_spq_post(p_hwfn, p_ent, NULL); qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP); return rc; } static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) { return (u8 __iomem *)p_hwfn->doorbells + qed_db_addr(cid, DQ_DEMS_LEGACY); } static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, u8 bdq_id) { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, MSTORM_SCSI_BDQ_EXT_PROD, RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; } } static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, u8 bdq_id) { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, TSTORM_SCSI_BDQ_EXT_PROD, RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; } } static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn) { if (!p_conn->queue_cnts_virt_addr) goto nomem; memset(p_conn->queue_cnts_virt_addr, 0, sizeof(*p_conn->queue_cnts_virt_addr)); if (!p_conn->tcp_upload_params_virt_addr) goto nomem; memset(p_conn->tcp_upload_params_virt_addr, 0, sizeof(*p_conn->tcp_upload_params_virt_addr)); if (!p_conn->r2tq.p_virt_addr) goto nomem; qed_chain_pbl_zero_mem(&p_conn->r2tq); if (!p_conn->uhq.p_virt_addr) goto nomem; qed_chain_pbl_zero_mem(&p_conn->uhq); if (!p_conn->xhq.p_virt_addr) goto nomem; qed_chain_pbl_zero_mem(&p_conn->xhq); return 0; nomem: return -ENOMEM; } static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, struct qed_iscsi_conn **p_out_conn) { struct scsi_terminate_extra_params *p_q_cnts = NULL; struct qed_iscsi_pf_params *p_params = NULL; struct qed_chain_init_params params = { .mode = QED_CHAIN_MODE_PBL, .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, .cnt_type = QED_CHAIN_CNT_TYPE_U16, }; struct tcp_upload_params *p_tcp = NULL; struct qed_iscsi_conn *p_conn = NULL; int rc = 0; /* Try finding a free connection that can be used */ spin_lock_bh(&p_hwfn->p_iscsi_info->lock); if (!list_empty(&p_hwfn->p_iscsi_info->free_list)) p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list, struct qed_iscsi_conn, list_entry); if (p_conn) { list_del(&p_conn->list_entry); spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); *p_out_conn = p_conn; return 0; } spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); /* Need to allocate a new connection */ p_params = &p_hwfn->pf_params.iscsi_pf_params; p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL); if (!p_conn) return -ENOMEM; p_q_cnts = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_q_cnts), &p_conn->queue_cnts_phys_addr, GFP_KERNEL); if (!p_q_cnts) goto nomem_queue_cnts_param; p_conn->queue_cnts_virt_addr = p_q_cnts; p_tcp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_tcp), &p_conn->tcp_upload_params_phys_addr, GFP_KERNEL); if (!p_tcp) goto nomem_upload_param; p_conn->tcp_upload_params_virt_addr = p_tcp; params.num_elems = p_params->num_r2tq_pages_in_ring * QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_wqe); params.elem_size = sizeof(struct iscsi_wqe); rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, &params); if (rc) goto nomem_r2tq; params.num_elems = p_params->num_uhq_pages_in_ring * QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe); params.elem_size = sizeof(struct iscsi_uhqe); rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, &params); if (rc) goto nomem_uhq; params.elem_size = sizeof(struct iscsi_xhqe); rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, &params); if (rc) goto nomem; p_conn->free_on_delete = true; *p_out_conn = p_conn; return 0; nomem: qed_chain_free(p_hwfn->cdev, &p_conn->uhq); nomem_uhq: qed_chain_free(p_hwfn->cdev, &p_conn->r2tq); nomem_r2tq: dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(struct tcp_upload_params), p_conn->tcp_upload_params_virt_addr, p_conn->tcp_upload_params_phys_addr); nomem_upload_param: dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(struct scsi_terminate_extra_params), p_conn->queue_cnts_virt_addr, p_conn->queue_cnts_phys_addr); nomem_queue_cnts_param: kfree(p_conn); return -ENOMEM; } static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn, struct qed_iscsi_conn *p_in_conn, struct qed_iscsi_conn **p_out_conn) { struct qed_iscsi_conn *p_conn = NULL; int rc = 0; u32 icid; spin_lock_bh(&p_hwfn->p_iscsi_info->lock); rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_TCP_ULP, &icid); spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); if (rc) return rc; /* Use input connection or allocate a new one */ if (p_in_conn) p_conn = p_in_conn; else rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn); if (!rc) rc = qed_iscsi_setup_connection(p_conn); if (rc) { spin_lock_bh(&p_hwfn->p_iscsi_info->lock); qed_cxt_release_cid(p_hwfn, icid); spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); return rc; } p_conn->icid = icid; p_conn->conn_id = (u16)icid; p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid; *p_out_conn = p_conn; return rc; } static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn, struct qed_iscsi_conn *p_conn) { spin_lock_bh(&p_hwfn->p_iscsi_info->lock); list_add_tail(&p_conn->list_entry, &p_hwfn->p_iscsi_info->free_list); qed_cxt_release_cid(p_hwfn, p_conn->icid); spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); } static void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn, struct qed_iscsi_conn *p_conn) { qed_chain_free(p_hwfn->cdev, &p_conn->xhq); qed_chain_free(p_hwfn->cdev, &p_conn->uhq); qed_chain_free(p_hwfn->cdev, &p_conn->r2tq); dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(struct tcp_upload_params), p_conn->tcp_upload_params_virt_addr, p_conn->tcp_upload_params_phys_addr); dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(struct scsi_terminate_extra_params), p_conn->queue_cnts_virt_addr, p_conn->queue_cnts_phys_addr); kfree(p_conn); } int qed_iscsi_alloc(struct qed_hwfn *p_hwfn) { struct qed_iscsi_info *p_iscsi_info; p_iscsi_info = kzalloc(sizeof(*p_iscsi_info), GFP_KERNEL); if (!p_iscsi_info) return -ENOMEM; INIT_LIST_HEAD(&p_iscsi_info->free_list); p_hwfn->p_iscsi_info = p_iscsi_info; return 0; } void qed_iscsi_setup(struct qed_hwfn *p_hwfn) { spin_lock_init(&p_hwfn->p_iscsi_info->lock); } void qed_iscsi_free(struct qed_hwfn *p_hwfn) { struct qed_iscsi_conn *p_conn = NULL; if (!p_hwfn->p_iscsi_info) return; while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) { p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list, struct qed_iscsi_conn, list_entry); if (p_conn) { list_del(&p_conn->list_entry); qed_iscsi_free_connection(p_hwfn, p_conn); } } kfree(p_hwfn->p_iscsi_info); p_hwfn->p_iscsi_info = NULL; } static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_iscsi_stats *p_stats) { struct tstorm_iscsi_stats_drv tstats; u32 tstats_addr; memset(&tstats, 0, sizeof(tstats)); tstats_addr = BAR0_MAP_REG_TSDM_RAM + TSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id); qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats)); p_stats->iscsi_rx_bytes_cnt = HILO_64_REGPAIR(tstats.iscsi_rx_bytes_cnt); p_stats->iscsi_rx_packet_cnt = HILO_64_REGPAIR(tstats.iscsi_rx_packet_cnt); p_stats->iscsi_rx_new_ooo_isle_events_cnt = HILO_64_REGPAIR(tstats.iscsi_rx_new_ooo_isle_events_cnt); p_stats->iscsi_cmdq_threshold_cnt = le32_to_cpu(tstats.iscsi_cmdq_threshold_cnt); p_stats->iscsi_rq_threshold_cnt = le32_to_cpu(tstats.iscsi_rq_threshold_cnt); p_stats->iscsi_immq_threshold_cnt = le32_to_cpu(tstats.iscsi_immq_threshold_cnt); } static void _qed_iscsi_get_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_iscsi_stats *p_stats) { struct mstorm_iscsi_stats_drv mstats; u32 mstats_addr; memset(&mstats, 0, sizeof(mstats)); mstats_addr = BAR0_MAP_REG_MSDM_RAM + MSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id); qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, sizeof(mstats)); p_stats->iscsi_rx_dropped_pdus_task_not_valid = HILO_64_REGPAIR(mstats.iscsi_rx_dropped_pdus_task_not_valid); } static void _qed_iscsi_get_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_iscsi_stats *p_stats) { struct ustorm_iscsi_stats_drv ustats; u32 ustats_addr; memset(&ustats, 0, sizeof(ustats)); ustats_addr = BAR0_MAP_REG_USDM_RAM + USTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id); qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats)); p_stats->iscsi_rx_data_pdu_cnt = HILO_64_REGPAIR(ustats.iscsi_rx_data_pdu_cnt); p_stats->iscsi_rx_r2t_pdu_cnt = HILO_64_REGPAIR(ustats.iscsi_rx_r2t_pdu_cnt); p_stats->iscsi_rx_total_pdu_cnt = HILO_64_REGPAIR(ustats.iscsi_rx_total_pdu_cnt); } static void _qed_iscsi_get_xstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_iscsi_stats *p_stats) { struct xstorm_iscsi_stats_drv xstats; u32 xstats_addr; memset(&xstats, 0, sizeof(xstats)); xstats_addr = BAR0_MAP_REG_XSDM_RAM + XSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id); qed_memcpy_from(p_hwfn, p_ptt, &xstats, xstats_addr, sizeof(xstats)); p_stats->iscsi_tx_go_to_slow_start_event_cnt = HILO_64_REGPAIR(xstats.iscsi_tx_go_to_slow_start_event_cnt); p_stats->iscsi_tx_fast_retransmit_event_cnt = HILO_64_REGPAIR(xstats.iscsi_tx_fast_retransmit_event_cnt); } static void _qed_iscsi_get_ystats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_iscsi_stats *p_stats) { struct ystorm_iscsi_stats_drv ystats; u32 ystats_addr; memset(&ystats, 0, sizeof(ystats)); ystats_addr = BAR0_MAP_REG_YSDM_RAM + YSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id); qed_memcpy_from(p_hwfn, p_ptt, &ystats, ystats_addr, sizeof(ystats)); p_stats->iscsi_tx_data_pdu_cnt = HILO_64_REGPAIR(ystats.iscsi_tx_data_pdu_cnt); p_stats->iscsi_tx_r2t_pdu_cnt = HILO_64_REGPAIR(ystats.iscsi_tx_r2t_pdu_cnt); p_stats->iscsi_tx_total_pdu_cnt = HILO_64_REGPAIR(ystats.iscsi_tx_total_pdu_cnt); } static void _qed_iscsi_get_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_iscsi_stats *p_stats) { struct pstorm_iscsi_stats_drv pstats; u32 pstats_addr; memset(&pstats, 0, sizeof(pstats)); pstats_addr = BAR0_MAP_REG_PSDM_RAM + PSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id); qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats)); p_stats->iscsi_tx_bytes_cnt = HILO_64_REGPAIR(pstats.iscsi_tx_bytes_cnt); p_stats->iscsi_tx_packet_cnt = HILO_64_REGPAIR(pstats.iscsi_tx_packet_cnt); } static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn, struct qed_iscsi_stats *stats, bool is_atomic) { struct qed_ptt *p_ptt; memset(stats, 0, sizeof(*stats)); p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic); if (!p_ptt) { DP_ERR(p_hwfn, "Failed to acquire ptt\n"); return -EAGAIN; } _qed_iscsi_get_tstats(p_hwfn, p_ptt, stats); _qed_iscsi_get_mstats(p_hwfn, p_ptt, stats); _qed_iscsi_get_ustats(p_hwfn, p_ptt, stats); _qed_iscsi_get_xstats(p_hwfn, p_ptt, stats); _qed_iscsi_get_ystats(p_hwfn, p_ptt, stats); _qed_iscsi_get_pstats(p_hwfn, p_ptt, stats); qed_ptt_release(p_hwfn, p_ptt); return 0; } struct qed_hash_iscsi_con { struct hlist_node node; struct qed_iscsi_conn *con; }; static int qed_fill_iscsi_dev_info(struct qed_dev *cdev, struct qed_dev_iscsi_info *info) { struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev); int rc; memset(info, 0, sizeof(*info)); rc = qed_fill_dev_info(cdev, &info->common); info->primary_dbq_rq_addr = qed_iscsi_get_primary_bdq_prod(hwfn, BDQ_ID_RQ); info->secondary_bdq_rq_addr = qed_iscsi_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); info->num_cqs = FEAT_NUM(hwfn, QED_ISCSI_CQ); return rc; } static void qed_register_iscsi_ops(struct qed_dev *cdev, struct qed_iscsi_cb_ops *ops, void *cookie) { cdev->protocol_ops.iscsi = ops; cdev->ops_cookie = cookie; } static struct qed_hash_iscsi_con *qed_iscsi_get_hash(struct qed_dev *cdev, u32 handle) { struct qed_hash_iscsi_con *hash_con = NULL; if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) return NULL; hash_for_each_possible(cdev->connections, hash_con, node, handle) { if (hash_con->con->icid == handle) break; } if (!hash_con || (hash_con->con->icid != handle)) return NULL; return hash_con; } static int qed_iscsi_stop(struct qed_dev *cdev) { int rc; if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) { DP_NOTICE(cdev, "iscsi already stopped\n"); return 0; } if (!hash_empty(cdev->connections)) { DP_NOTICE(cdev, "Can't stop iscsi - not all connections were returned\n"); return -EINVAL; } /* Stop the iscsi */ rc = qed_sp_iscsi_func_stop(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK, NULL); cdev->flags &= ~QED_FLAG_STORAGE_STARTED; return rc; } static int qed_iscsi_start(struct qed_dev *cdev, struct qed_iscsi_tid *tasks, void *event_context, iscsi_event_cb_t async_event_cb) { int rc; struct qed_tid_mem *tid_info; if (cdev->flags & QED_FLAG_STORAGE_STARTED) { DP_NOTICE(cdev, "iscsi already started;\n"); return 0; } rc = qed_sp_iscsi_func_start(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK, NULL, event_context, async_event_cb); if (rc) { DP_NOTICE(cdev, "Failed to start iscsi\n"); return rc; } cdev->flags |= QED_FLAG_STORAGE_STARTED; hash_init(cdev->connections); if (!tasks) return 0; tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL); if (!tid_info) { qed_iscsi_stop(cdev); return -ENOMEM; } rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info); if (rc) { DP_NOTICE(cdev, "Failed to gather task information\n"); qed_iscsi_stop(cdev); kfree(tid_info); return rc; } /* Fill task information */ tasks->size = tid_info->tid_size; tasks->num_tids_per_block = tid_info->num_tids_per_block; memcpy(tasks->blocks, tid_info->blocks, MAX_TID_BLOCKS_ISCSI * sizeof(u8 *)); kfree(tid_info); return 0; } static int qed_iscsi_acquire_conn(struct qed_dev *cdev, u32 *handle, u32 *fw_cid, void __iomem **p_doorbell) { struct qed_hash_iscsi_con *hash_con; int rc; /* Allocate a hashed connection */ hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC); if (!hash_con) return -ENOMEM; /* Acquire the connection */ rc = qed_iscsi_acquire_connection(QED_AFFIN_HWFN(cdev), NULL, &hash_con->con); if (rc) { DP_NOTICE(cdev, "Failed to acquire Connection\n"); kfree(hash_con); return rc; } /* Added the connection to hash table */ *handle = hash_con->con->icid; *fw_cid = hash_con->con->fw_cid; hash_add(cdev->connections, &hash_con->node, *handle); if (p_doorbell) *p_doorbell = qed_iscsi_get_db_addr(QED_AFFIN_HWFN(cdev), *handle); return 0; } static int qed_iscsi_release_conn(struct qed_dev *cdev, u32 handle) { struct qed_hash_iscsi_con *hash_con; hash_con = qed_iscsi_get_hash(cdev, handle); if (!hash_con) { DP_NOTICE(cdev, "Failed to find connection for handle %d\n", handle); return -EINVAL; } hlist_del(&hash_con->node); qed_iscsi_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con); kfree(hash_con); return 0; } static int qed_iscsi_offload_conn(struct qed_dev *cdev, u32 handle, struct qed_iscsi_params_offload *conn_info) { struct qed_hash_iscsi_con *hash_con; struct qed_iscsi_conn *con; hash_con = qed_iscsi_get_hash(cdev, handle); if (!hash_con) { DP_NOTICE(cdev, "Failed to find connection for handle %d\n", handle); return -EINVAL; } /* Update the connection with information from the params */ con = hash_con->con; ether_addr_copy(con->local_mac, conn_info->src.mac); ether_addr_copy(con->remote_mac, conn_info->dst.mac); memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip)); memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip)); con->local_port = conn_info->src.port; con->remote_port = conn_info->dst.port; con->layer_code = conn_info->layer_code; con->sq_pbl_addr = conn_info->sq_pbl_addr; con->initial_ack = conn_info->initial_ack; con->vlan_id = conn_info->vlan_id; con->tcp_flags = conn_info->tcp_flags; con->ip_version = conn_info->ip_version; con->default_cq = conn_info->default_cq; con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt; con->dup_ack_theshold = conn_info->dup_ack_theshold; con->rcv_next = conn_info->rcv_next; con->snd_una = conn_info->snd_una; con->snd_next = conn_info->snd_next; con->snd_max = conn_info->snd_max; con->snd_wnd = conn_info->snd_wnd; con->rcv_wnd = conn_info->rcv_wnd; con->snd_wl1 = conn_info->snd_wl1; con->cwnd = conn_info->cwnd; con->ss_thresh = conn_info->ss_thresh; con->srtt = conn_info->srtt; con->rtt_var = conn_info->rtt_var; con->ts_recent = conn_info->ts_recent; con->ts_recent_age = conn_info->ts_recent_age; con->total_rt = conn_info->total_rt; con->ka_timeout_delta = conn_info->ka_timeout_delta; con->rt_timeout_delta = conn_info->rt_timeout_delta; con->dup_ack_cnt = conn_info->dup_ack_cnt; con->snd_wnd_probe_cnt = conn_info->snd_wnd_probe_cnt; con->ka_probe_cnt = conn_info->ka_probe_cnt; con->rt_cnt = conn_info->rt_cnt; con->flow_label = conn_info->flow_label; con->ka_timeout = conn_info->ka_timeout; con->ka_interval = conn_info->ka_interval; con->max_rt_time = conn_info->max_rt_time; con->initial_rcv_wnd = conn_info->initial_rcv_wnd; con->ttl = conn_info->ttl; con->tos_or_tc = conn_info->tos_or_tc; con->remote_port = conn_info->remote_port; con->local_port = conn_info->local_port; con->mss = conn_info->mss; con->snd_wnd_scale = conn_info->snd_wnd_scale; con->rcv_wnd_scale = conn_info->rcv_wnd_scale; con->da_timeout_value = conn_info->da_timeout_value; con->ack_frequency = conn_info->ack_frequency; /* Set default values on other connection fields */ con->offl_flags = 0x1; return qed_sp_iscsi_conn_offload(QED_AFFIN_HWFN(cdev), con, QED_SPQ_MODE_EBLOCK, NULL); } static int qed_iscsi_update_conn(struct qed_dev *cdev, u32 handle, struct qed_iscsi_params_update *conn_info) { struct qed_hash_iscsi_con *hash_con; struct qed_iscsi_conn *con; hash_con = qed_iscsi_get_hash(cdev, handle); if (!hash_con) { DP_NOTICE(cdev, "Failed to find connection for handle %d\n", handle); return -EINVAL; } /* Update the connection with information from the params */ con = hash_con->con; con->update_flag = conn_info->update_flag; con->max_seq_size = conn_info->max_seq_size; con->max_recv_pdu_length = conn_info->max_recv_pdu_length; con->max_send_pdu_length = conn_info->max_send_pdu_length; con->first_seq_length = conn_info->first_seq_length; con->exp_stat_sn = conn_info->exp_stat_sn; return qed_sp_iscsi_conn_update(QED_AFFIN_HWFN(cdev), con, QED_SPQ_MODE_EBLOCK, NULL); } static int qed_iscsi_clear_conn_sq(struct qed_dev *cdev, u32 handle) { struct qed_hash_iscsi_con *hash_con; hash_con = qed_iscsi_get_hash(cdev, handle); if (!hash_con) { DP_NOTICE(cdev, "Failed to find connection for handle %d\n", handle); return -EINVAL; } return qed_sp_iscsi_conn_clear_sq(QED_AFFIN_HWFN(cdev), hash_con->con, QED_SPQ_MODE_EBLOCK, NULL); } static int qed_iscsi_destroy_conn(struct qed_dev *cdev, u32 handle, u8 abrt_conn) { struct qed_hash_iscsi_con *hash_con; hash_con = qed_iscsi_get_hash(cdev, handle); if (!hash_con) { DP_NOTICE(cdev, "Failed to find connection for handle %d\n", handle); return -EINVAL; } hash_con->con->abortive_dsconnect = abrt_conn; return qed_sp_iscsi_conn_terminate(QED_AFFIN_HWFN(cdev), hash_con->con, QED_SPQ_MODE_EBLOCK, NULL); } static int qed_iscsi_stats_context(struct qed_dev *cdev, struct qed_iscsi_stats *stats, bool is_atomic) { return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic); } static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats) { return qed_iscsi_stats_context(cdev, stats, false); } static int qed_iscsi_change_mac(struct qed_dev *cdev, u32 handle, const u8 *mac) { struct qed_hash_iscsi_con *hash_con; hash_con = qed_iscsi_get_hash(cdev, handle); if (!hash_con) { DP_NOTICE(cdev, "Failed to find connection for handle %d\n", handle); return -EINVAL; } return qed_sp_iscsi_mac_update(QED_AFFIN_HWFN(cdev), hash_con->con, QED_SPQ_MODE_EBLOCK, NULL); } void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, struct qed_mcp_iscsi_stats *stats, bool is_atomic) { struct qed_iscsi_stats proto_stats; /* Retrieve FW statistics */ memset(&proto_stats, 0, sizeof(proto_stats)); if (qed_iscsi_stats_context(cdev, &proto_stats, is_atomic)) { DP_VERBOSE(cdev, QED_MSG_STORAGE, "Failed to collect ISCSI statistics\n"); return; } /* Translate FW statistics into struct */ stats->rx_pdus = proto_stats.iscsi_rx_total_pdu_cnt; stats->tx_pdus = proto_stats.iscsi_tx_total_pdu_cnt; stats->rx_bytes = proto_stats.iscsi_rx_bytes_cnt; stats->tx_bytes = proto_stats.iscsi_tx_bytes_cnt; } static const struct qed_iscsi_ops qed_iscsi_ops_pass = { .common = &qed_common_ops_pass, .ll2 = &qed_ll2_ops_pass, .fill_dev_info = &qed_fill_iscsi_dev_info, .register_ops = &qed_register_iscsi_ops, .start = &qed_iscsi_start, .stop = &qed_iscsi_stop, .acquire_conn = &qed_iscsi_acquire_conn, .release_conn = &qed_iscsi_release_conn, .offload_conn = &qed_iscsi_offload_conn, .update_conn = &qed_iscsi_update_conn, .destroy_conn = &qed_iscsi_destroy_conn, .clear_sq = &qed_iscsi_clear_conn_sq, .get_stats = &qed_iscsi_stats, .change_mac = &qed_iscsi_change_mac, }; const struct qed_iscsi_ops *qed_get_iscsi_ops(void) { return &qed_iscsi_ops_pass; } EXPORT_SYMBOL(qed_get_iscsi_ops); void qed_put_iscsi_ops(void) { } EXPORT_SYMBOL(qed_put_iscsi_ops);
linux-master
drivers/net/ethernet/qlogic/qed/qed_iscsi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Marvell/Qlogic FastLinQ NIC driver * * Copyright (C) 2020 Marvell International Ltd. */ #include <linux/kernel.h> #include <linux/qed/qed_if.h> #include <linux/vmalloc.h> #include "qed.h" #include "qed_devlink.h" enum qed_devlink_param_id { QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, QED_DEVLINK_PARAM_ID_IWARP_CMT, }; struct qed_fw_fatal_ctx { enum qed_hw_err_type err_type; }; int qed_report_fatal_error(struct devlink *devlink, enum qed_hw_err_type err_type) { struct qed_devlink *qdl = devlink_priv(devlink); struct qed_fw_fatal_ctx fw_fatal_ctx = { .err_type = err_type, }; if (qdl->fw_reporter) devlink_health_report(qdl->fw_reporter, "Fatal error occurred", &fw_fatal_ctx); return 0; } static int qed_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, void *priv_ctx, struct netlink_ext_ack *extack) { struct qed_devlink *qdl = devlink_health_reporter_priv(reporter); struct qed_fw_fatal_ctx *fw_fatal_ctx = priv_ctx; struct qed_dev *cdev = qdl->cdev; u32 dbg_data_buf_size; u8 *p_dbg_data_buf; int err; /* Having context means that was a dump request after fatal, * so we enable extra debugging while gathering the dump, * just in case */ cdev->print_dbg_data = fw_fatal_ctx ? true : false; dbg_data_buf_size = qed_dbg_all_data_size(cdev); p_dbg_data_buf = vzalloc(dbg_data_buf_size); if (!p_dbg_data_buf) { DP_NOTICE(cdev, "Failed to allocate memory for a debug data buffer\n"); return -ENOMEM; } err = qed_dbg_all_data(cdev, p_dbg_data_buf); if (err) { DP_NOTICE(cdev, "Failed to obtain debug data\n"); vfree(p_dbg_data_buf); return err; } err = devlink_fmsg_binary_pair_put(fmsg, "dump_data", p_dbg_data_buf, dbg_data_buf_size); vfree(p_dbg_data_buf); return err; } static int qed_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter, void *priv_ctx, struct netlink_ext_ack *extack) { struct qed_devlink *qdl = devlink_health_reporter_priv(reporter); struct qed_dev *cdev = qdl->cdev; qed_recovery_process(cdev); return 0; } static const struct devlink_health_reporter_ops qed_fw_fatal_reporter_ops = { .name = "fw_fatal", .recover = qed_fw_fatal_reporter_recover, .dump = qed_fw_fatal_reporter_dump, }; #define QED_REPORTER_FW_GRACEFUL_PERIOD 0 void qed_fw_reporters_create(struct devlink *devlink) { struct qed_devlink *dl = devlink_priv(devlink); dl->fw_reporter = devlink_health_reporter_create(devlink, &qed_fw_fatal_reporter_ops, QED_REPORTER_FW_GRACEFUL_PERIOD, dl); if (IS_ERR(dl->fw_reporter)) { DP_NOTICE(dl->cdev, "Failed to create fw reporter, err = %ld\n", PTR_ERR(dl->fw_reporter)); dl->fw_reporter = NULL; } } void qed_fw_reporters_destroy(struct devlink *devlink) { struct qed_devlink *dl = devlink_priv(devlink); struct devlink_health_reporter *rep; rep = dl->fw_reporter; if (!IS_ERR_OR_NULL(rep)) devlink_health_reporter_destroy(rep); } static int qed_dl_param_get(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx) { struct qed_devlink *qed_dl = devlink_priv(dl); struct qed_dev *cdev; cdev = qed_dl->cdev; ctx->val.vbool = cdev->iwarp_cmt; return 0; } static int qed_dl_param_set(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx) { struct qed_devlink *qed_dl = devlink_priv(dl); struct qed_dev *cdev; cdev = qed_dl->cdev; cdev->iwarp_cmt = ctx->val.vbool; return 0; } static const struct devlink_param qed_devlink_params[] = { DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT, "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL, BIT(DEVLINK_PARAM_CMODE_RUNTIME), qed_dl_param_get, qed_dl_param_set, NULL), }; static int qed_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack) { struct qed_devlink *qed_dl = devlink_priv(devlink); struct qed_dev *cdev = qed_dl->cdev; struct qed_dev_info *dev_info; char buf[100]; int err; dev_info = &cdev->common_dev_info; memcpy(buf, cdev->hwfns[0].hw_info.part_num, sizeof(cdev->hwfns[0].hw_info.part_num)); buf[sizeof(cdev->hwfns[0].hw_info.part_num)] = 0; if (buf[0]) { err = devlink_info_board_serial_number_put(req, buf); if (err) return err; } snprintf(buf, sizeof(buf), "%d.%d.%d.%d", GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_3), GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_2), GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_1), GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_0)); err = devlink_info_version_stored_put(req, DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, buf); if (err) return err; snprintf(buf, sizeof(buf), "%d.%d.%d.%d", dev_info->fw_major, dev_info->fw_minor, dev_info->fw_rev, dev_info->fw_eng); return devlink_info_version_running_put(req, DEVLINK_INFO_VERSION_GENERIC_FW_APP, buf); } static const struct devlink_ops qed_dl_ops = { .info_get = qed_devlink_info_get, }; struct devlink *qed_devlink_register(struct qed_dev *cdev) { struct qed_devlink *qdevlink; struct devlink *dl; int rc; dl = devlink_alloc(&qed_dl_ops, sizeof(struct qed_devlink), &cdev->pdev->dev); if (!dl) return ERR_PTR(-ENOMEM); qdevlink = devlink_priv(dl); qdevlink->cdev = cdev; rc = devlink_params_register(dl, qed_devlink_params, ARRAY_SIZE(qed_devlink_params)); if (rc) goto err_unregister; cdev->iwarp_cmt = false; qed_fw_reporters_create(dl); devlink_register(dl); return dl; err_unregister: devlink_free(dl); return ERR_PTR(rc); } void qed_devlink_unregister(struct devlink *devlink) { if (!devlink) return; devlink_unregister(devlink); qed_fw_reporters_destroy(devlink); devlink_params_unregister(devlink, qed_devlink_params, ARRAY_SIZE(qed_devlink_params)); devlink_free(devlink); }
linux-master
drivers/net/ethernet/qlogic/qed/qed_devlink.c